清洗日志数据
在将数据发送到日志集成(langfuse 等)之前,编辑消息/遮蔽个人身份信息(PII)。
请参阅我们的 Presidio PII Masking 作为参考。
- 设置自定义回调
from litellm.integrations.custom_logger import CustomLogger
class MyCustomHandler(CustomLogger):
    async def async_logging_hook(
        self, kwargs: dict, result: Any, call_type: str
    ) -> Tuple[dict, Any]:
        """
        For masking logged request/response. Return a modified version of the request/result. 
        
        Called before `async_log_success_event`.
        """
        if (
            call_type == "completion" or call_type == "acompletion"
        ):  # /chat/completions requests
            messages: Optional[List] = kwargs.get("messages", None)
            kwargs["messages"] = [{"role": "user", "content": "MASK_THIS_ASYNC_VALUE"}]
        return kwargs, responses
    def logging_hook(
        self, kwargs: dict, result: Any, call_type: str
    ) -> Tuple[dict, Any]:
        """
        For masking logged request/response. Return a modified version of the request/result.
        Called before `log_success_event`.
        """
        if (
            call_type == "completion" or call_type == "acompletion"
        ):  # /chat/completions requests
            messages: Optional[List] = kwargs.get("messages", None)
            kwargs["messages"] = [{"role": "user", "content": "MASK_THIS_SYNC_VALUE"}]
        return kwargs, responses
customHandler = MyCustomHandler()
- 将自定义处理程序连接到 LiteLLM
import litellm
litellm.callbacks = [customHandler]
- 测试!
# pip install langfuse 
import os
import litellm
from litellm import completion 
os.environ["LANGFUSE_PUBLIC_KEY"] = ""
os.environ["LANGFUSE_SECRET_KEY"] = ""
# Optional, defaults to https://cloud.langfuse.com
os.environ["LANGFUSE_HOST"] # optional
# LLM API Keys
os.environ['OPENAI_API_KEY']=""
litellm.callbacks = [customHandler]
litellm.success_callback = ["langfuse"]
## sync 
response = completion(model="gpt-3.5-turbo", messages=[{ "role": "user", "content": "Hi 👋 - i'm openai"}],
                              stream=True)
for chunk in response: 
    continue
## async
import asyncio 
def async completion():
    response = await acompletion(model="gpt-3.5-turbo", messages=[{ "role": "user", "content": "Hi 👋 - i'm openai"}],
                              stream=True)
    async for chunk in response: 
        continue
asyncio.run(completion())