feat: 一系列新功能
This commit is contained in:
@@ -1,13 +0,0 @@
|
||||
import pathlib
|
||||
from typing import Protocol
|
||||
|
||||
from heurams.services.logger import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class PlayFunctionProtocol(Protocol):
|
||||
def __call__(self, path: pathlib.Path) -> None: ...
|
||||
|
||||
|
||||
logger.debug("音频协议模块已加载")
|
||||
@@ -1,6 +1,19 @@
|
||||
# 大语言模型
|
||||
from heurams.services.logger import get_logger
|
||||
|
||||
from .base import BaseLLM
|
||||
from .openai import OpenAILLM
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
logger.debug("LLM providers 模块已加载")
|
||||
__all__ = [
|
||||
"BaseLLM",
|
||||
"OpenAILLM",
|
||||
]
|
||||
|
||||
providers = {
|
||||
"base": BaseLLM,
|
||||
"openai": OpenAILLM,
|
||||
}
|
||||
|
||||
logger.debug("LLM providers 已注册: %s", list(providers.keys()))
|
||||
|
||||
@@ -1,5 +1,55 @@
|
||||
"""LLM 提供者基类"""
|
||||
|
||||
import asyncio
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from heurams.services.logger import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
logger.debug("LLM 基类模块已加载")
|
||||
|
||||
class BaseLLM:
|
||||
"""LLM 提供者基类"""
|
||||
|
||||
name = "BaseLLM"
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
"""初始化 LLM 提供者
|
||||
|
||||
Args:
|
||||
config: 提供者配置字典
|
||||
"""
|
||||
self.config = config
|
||||
logger.debug("BaseLLM 初始化完成")
|
||||
|
||||
async def chat(self, messages: List[Dict[str, str]], **kwargs) -> str:
|
||||
"""发送聊天消息并获取响应
|
||||
|
||||
Args:
|
||||
messages: 消息列表,每个消息为 {"role": "user"|"assistant"|"system", "content": "消息内容"}
|
||||
**kwargs: 其他参数,如 temperature, max_tokens 等
|
||||
|
||||
Returns:
|
||||
模型返回的文本响应
|
||||
"""
|
||||
logger.debug("BaseLLM.chat: messages=%d, kwargs=%s", len(messages), kwargs)
|
||||
logger.warning("BaseLLM.chat 是基类方法,未实现具体功能")
|
||||
await asyncio.sleep(0) # 避免未使用异步的警告
|
||||
return "BaseLLM 未实现具体功能"
|
||||
|
||||
async def chat_stream(self, messages: List[Dict[str, str]], **kwargs):
|
||||
"""流式聊天(可选实现)
|
||||
|
||||
Args:
|
||||
messages: 消息列表
|
||||
**kwargs: 其他参数
|
||||
|
||||
Yields:
|
||||
流式响应的文本块
|
||||
"""
|
||||
logger.debug(
|
||||
"BaseLLM.chat_stream: messages=%d, kwargs=%s", len(messages), kwargs
|
||||
)
|
||||
logger.warning("BaseLLM.chat_stream 是基类方法,未实现具体功能")
|
||||
await asyncio.sleep(0)
|
||||
yield "BaseLLM 未实现流式功能"
|
||||
|
||||
@@ -1,5 +1,96 @@
|
||||
"""OpenAI 兼容 LLM 提供者"""
|
||||
|
||||
import asyncio
|
||||
from typing import Any, AsyncGenerator, Dict, List, Optional
|
||||
|
||||
from heurams.services.logger import get_logger
|
||||
|
||||
from .base import BaseLLM
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
logger.debug("OpenAI provider 模块已加载(未实现)")
|
||||
|
||||
class OpenAILLM(BaseLLM):
|
||||
"""OpenAI 兼容 LLM 提供者"""
|
||||
|
||||
name = "OpenAI"
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
super().__init__(config)
|
||||
self.api_key = config.get("key", "")
|
||||
self.base_url = config.get("url", "https://api.openai.com/v1")
|
||||
self._client = None
|
||||
logger.debug("OpenAILLM 初始化完成: base_url=%s", self.base_url)
|
||||
|
||||
def _get_client(self):
|
||||
"""获取 OpenAI 客户端(延迟导入)"""
|
||||
if self._client is None:
|
||||
try:
|
||||
from openai import AsyncOpenAI
|
||||
except ImportError:
|
||||
logger.error("未安装 openai 库,请运行: pip install openai")
|
||||
raise ImportError("未安装 openai 库,请运行: pip install openai")
|
||||
|
||||
self._client = AsyncOpenAI(
|
||||
api_key=self.api_key if self.api_key else None,
|
||||
base_url=self.base_url if self.base_url else None,
|
||||
)
|
||||
return self._client
|
||||
|
||||
async def chat(self, messages: List[Dict[str, str]], **kwargs) -> str:
|
||||
"""发送聊天消息并获取响应"""
|
||||
logger.debug("OpenAILLM.chat: messages=%d", len(messages))
|
||||
|
||||
client = self._get_client()
|
||||
|
||||
# 默认参数
|
||||
default_kwargs = {
|
||||
"model": kwargs.get("model", "gpt-3.5-turbo"),
|
||||
"temperature": kwargs.get("temperature", 0.7),
|
||||
"max_tokens": kwargs.get("max_tokens", 1000),
|
||||
}
|
||||
|
||||
# 合并参数,优先使用传入的 kwargs
|
||||
request_kwargs = {**default_kwargs, **kwargs}
|
||||
request_kwargs["messages"] = messages
|
||||
|
||||
try:
|
||||
response = await client.chat.completions.create(**request_kwargs)
|
||||
content = response.choices[0].message.content
|
||||
logger.debug(
|
||||
"OpenAILLM.chat 成功: response length=%d",
|
||||
len(content) if content else 0,
|
||||
)
|
||||
return content or ""
|
||||
except Exception as e:
|
||||
logger.error("OpenAILLM.chat 失败: %s", e)
|
||||
raise
|
||||
|
||||
async def chat_stream(
|
||||
self, messages: List[Dict[str, str]], **kwargs
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""流式聊天"""
|
||||
logger.debug("OpenAILLM.chat_stream: messages=%d", len(messages))
|
||||
|
||||
client = self._get_client()
|
||||
|
||||
# 默认参数
|
||||
default_kwargs = {
|
||||
"model": kwargs.get("model", "gpt-3.5-turbo"),
|
||||
"temperature": kwargs.get("temperature", 0.7),
|
||||
"max_tokens": kwargs.get("max_tokens", 1000),
|
||||
"stream": True,
|
||||
}
|
||||
|
||||
# 合并参数
|
||||
request_kwargs = {**default_kwargs, **kwargs}
|
||||
request_kwargs["messages"] = messages
|
||||
|
||||
try:
|
||||
stream = await client.chat.completions.create(**request_kwargs)
|
||||
async for chunk in stream:
|
||||
if chunk.choices[0].delta.content:
|
||||
yield chunk.choices[0].delta.content
|
||||
except Exception as e:
|
||||
logger.error("OpenAILLM.chat_stream 失败: %s", e)
|
||||
raise
|
||||
|
||||
Reference in New Issue
Block a user