使用 ChatLiteLLM() - Langchain
先决条件
!pip install litellm langchain
快速入门
- OpenAI
- Anthropic
- Replicate
- Cohere
import os
from langchain_community.chat_models import ChatLiteLLM
from langchain_core.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
os.environ['OPENAI_API_KEY'] = ""
chat = ChatLiteLLM(model="gpt-3.5-turbo")
messages = [
HumanMessage(
content="what model are you"
)
]
chat.invoke(messages)
import os
from langchain_community.chat_models import ChatLiteLLM
from langchain_core.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
os.environ['ANTHROPIC_API_KEY'] = ""
chat = ChatLiteLLM(model="claude-2", temperature=0.3)
messages = [
HumanMessage(
content="what model are you"
)
]
chat.invoke(messages)
import os
from langchain_community.chat_models import ChatLiteLLM
from langchain_core.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
os.environ['REPLICATE_API_TOKEN'] = ""
chat = ChatLiteLLM(model="replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1")
messages = [
HumanMessage(
content="what model are you?"
)
]
chat.invoke(messages)
import os
from langchain_community.chat_models import ChatLiteLLM
from langchain_core.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
os.environ['COHERE_API_KEY'] = ""
chat = ChatLiteLLM(model="command-nightly")
messages = [
HumanMessage(
content="what model are you?"
)
]
chat.invoke(messages)
将 Langchain ChatLiteLLM 与 MLflow 结合使用
MLflow 为 ChatLiteLLM 提供开源可观测性解决方案。
要启用集成,只需在代码中调用 mlflow.litellm.autolog()
即可。无需其他设置。
import mlflow
mlflow.litellm.autolog()
启用自动追踪后,您可以调用 ChatLiteLLM
并在 MLflow 中查看记录的追踪。
import os
from langchain.chat_models import ChatLiteLLM
os.environ['OPENAI_API_KEY']="sk-..."
chat = ChatLiteLLM(model="gpt-4o-mini")
chat.invoke("Hi!")
将 Langchain ChatLiteLLM 与 Lunary 结合使用
import os
from langchain.chat_models import ChatLiteLLM
from langchain.schema import HumanMessage
import litellm
os.environ["LUNARY_PUBLIC_KEY"] = "" # from https://app.lunary.ai/settings
os.environ['OPENAI_API_KEY']="sk-..."
litellm.success_callback = ["lunary"]
litellm.failure_callback = ["lunary"]
chat = ChatLiteLLM(
model="gpt-4o"
messages = [
HumanMessage(
content="what model are you"
)
]
chat(messages)
在此处获取更多详情
使用 LangChain ChatLiteLLM + Langfuse
请在此处查看此部分,了解有关如何将 Langfuse 与 ChatLiteLLM 集成的更多详情。