init
This commit is contained in:
0
providers/__init__.py
Normal file
0
providers/__init__.py
Normal file
73
providers/base_provider.py
Normal file
73
providers/base_provider.py
Normal file
@@ -0,0 +1,73 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional, List, Dict, Any, Tuple
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
|
||||
class LLMProvider(ABC):
|
||||
"""
|
||||
Abstract base class for LLM providers
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self, db: Session, api_key: Optional[str] = None):
|
||||
"""
|
||||
Initialize the provider with a database session and optional API key
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def generate_response(self, model: str, prompt: str, max_tokens: Optional[int] = None) -> str:
|
||||
"""
|
||||
Generate a response from the LLM
|
||||
"""
|
||||
pass
|
||||
|
||||
def supports_tools(self) -> bool:
|
||||
"""
|
||||
Whether this provider supports function/tool calling.
|
||||
Override in subclasses that support it.
|
||||
"""
|
||||
return False
|
||||
|
||||
async def generate_response_with_tools(
|
||||
self,
|
||||
model: str,
|
||||
prompt: str,
|
||||
tools: List[Dict[str, Any]],
|
||||
max_tokens: Optional[int] = None
|
||||
) -> Tuple[str, List[Dict[str, Any]]]:
|
||||
"""
|
||||
Generate a response with tool definitions available.
|
||||
Returns (text_content, tool_calls) where tool_calls is a list of
|
||||
dicts with keys: name, arguments (dict).
|
||||
If no tools are called, tool_calls is empty and text_content has the response.
|
||||
Default implementation falls back to regular generation.
|
||||
"""
|
||||
text = await self.generate_response(model, prompt, max_tokens)
|
||||
return text, []
|
||||
|
||||
|
||||
class ProviderFactory:
|
||||
"""
|
||||
Factory class to create provider instances
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def create_provider(db: Session, provider_type: str, api_key: Optional[str] = None):
|
||||
"""
|
||||
Create a provider instance based on the type
|
||||
"""
|
||||
if provider_type.value == "openai":
|
||||
from app.providers.openai_provider import OpenAIProvider
|
||||
return OpenAIProvider(db, api_key)
|
||||
elif provider_type.value == "claude":
|
||||
from app.providers.claude_provider import ClaudeProvider
|
||||
return ClaudeProvider(db, api_key)
|
||||
elif provider_type.value == "qwen":
|
||||
from app.providers.qwen_provider import QwenProvider
|
||||
return QwenProvider(db, api_key)
|
||||
elif provider_type.value == "deepseek":
|
||||
from app.providers.deepseek_provider import DeepSeekProvider
|
||||
return DeepSeekProvider(db, api_key)
|
||||
else:
|
||||
raise ValueError(f"Unsupported provider type: {provider_type}")
|
||||
85
providers/claude_provider.py
Normal file
85
providers/claude_provider.py
Normal file
@@ -0,0 +1,85 @@
|
||||
import anthropic
|
||||
from typing import Optional, List, Dict, Any, Tuple
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from providers.base_provider import LLMProvider
|
||||
from services.api_key_service import ApiKeyService
|
||||
|
||||
SYSTEM_PROMPT = "你正在参与一场结构化辩论。请按照用户消息中的规则进行辩论,直接给出你的论点,不要重复提示词或历史记录。"
|
||||
|
||||
|
||||
class ClaudeProvider(LLMProvider):
|
||||
"""
|
||||
Anthropic Claude API provider implementation
|
||||
"""
|
||||
|
||||
def __init__(self, db: Session, api_key: Optional[str] = None):
|
||||
if not api_key:
|
||||
api_key = ApiKeyService.get_api_key(db, "claude")
|
||||
|
||||
if api_key:
|
||||
self.client = anthropic.AsyncAnthropic(api_key=api_key)
|
||||
else:
|
||||
raise ValueError("Claude API key not found in database or provided")
|
||||
|
||||
def supports_tools(self) -> bool:
|
||||
return True
|
||||
|
||||
async def generate_response(self, model: str, prompt: str, max_tokens: Optional[int] = None) -> str:
|
||||
try:
|
||||
response = await self.client.messages.create(
|
||||
model=model,
|
||||
max_tokens=max_tokens or 500,
|
||||
temperature=0.7,
|
||||
system=SYSTEM_PROMPT,
|
||||
messages=[
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
)
|
||||
return response.content[0].text
|
||||
except Exception as e:
|
||||
raise Exception(f"Error calling Claude API: {str(e)}")
|
||||
|
||||
async def generate_response_with_tools(
|
||||
self,
|
||||
model: str,
|
||||
prompt: str,
|
||||
tools: List[Dict[str, Any]],
|
||||
max_tokens: Optional[int] = None
|
||||
) -> Tuple[str, List[Dict[str, Any]]]:
|
||||
try:
|
||||
# Convert OpenAI-format tools to Anthropic format
|
||||
anthropic_tools = []
|
||||
for tool in tools:
|
||||
func = tool.get("function", tool)
|
||||
anthropic_tools.append({
|
||||
"name": func["name"],
|
||||
"description": func.get("description", ""),
|
||||
"input_schema": func.get("parameters", func.get("input_schema", {}))
|
||||
})
|
||||
|
||||
response = await self.client.messages.create(
|
||||
model=model,
|
||||
max_tokens=max_tokens or 500,
|
||||
temperature=0.7,
|
||||
system=SYSTEM_PROMPT,
|
||||
messages=[
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
tools=anthropic_tools
|
||||
)
|
||||
|
||||
text_content = ""
|
||||
tool_calls = []
|
||||
for block in response.content:
|
||||
if block.type == "text":
|
||||
text_content += block.text
|
||||
elif block.type == "tool_use":
|
||||
tool_calls.append({
|
||||
"name": block.name,
|
||||
"arguments": block.input
|
||||
})
|
||||
|
||||
return text_content.strip(), tool_calls
|
||||
except Exception as e:
|
||||
raise Exception(f"Error calling Claude API with tools: {str(e)}")
|
||||
64
providers/deepseek_provider.py
Normal file
64
providers/deepseek_provider.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from typing import Optional
|
||||
from sqlalchemy.orm import Session
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from providers.base_provider import LLMProvider
|
||||
from services.api_key_service import ApiKeyService
|
||||
|
||||
|
||||
class DeepSeekProvider(LLMProvider):
|
||||
"""
|
||||
DeepSeek API provider implementation using OpenAI-compatible API
|
||||
"""
|
||||
|
||||
def __init__(self, db: Session, api_key: Optional[str] = None):
|
||||
if not api_key:
|
||||
api_key = ApiKeyService.get_api_key(db, "deepseek")
|
||||
|
||||
if not api_key:
|
||||
raise ValueError("DeepSeek API key not found in database or provided")
|
||||
|
||||
self.client = AsyncOpenAI(
|
||||
api_key=api_key,
|
||||
base_url="https://api.deepseek.com"
|
||||
)
|
||||
|
||||
def supports_tools(self) -> bool:
|
||||
return False
|
||||
|
||||
async def generate_response(self, model: str, prompt: str, max_tokens: Optional[int] = None) -> str:
|
||||
try:
|
||||
is_reasoner = "reasoner" in model or "r1" in model.lower()
|
||||
|
||||
messages = [{"role": "user", "content": prompt}]
|
||||
# deepseek-reasoner 不支持 system message,把指令放进 user message
|
||||
if not is_reasoner:
|
||||
messages.insert(0, {
|
||||
"role": "system",
|
||||
"content": "你正在参与一场结构化辩论。请按照用户消息中的规则进行辩论,直接给出你的论点,不要重复提示词或历史记录。"
|
||||
})
|
||||
|
||||
kwargs = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
}
|
||||
# reasoner 模型不支持 max_tokens,使用 max_completion_tokens
|
||||
if is_reasoner:
|
||||
kwargs["max_completion_tokens"] = max_tokens or 4096
|
||||
else:
|
||||
kwargs["max_tokens"] = max_tokens or 500
|
||||
|
||||
response = await self.client.chat.completions.create(**kwargs)
|
||||
|
||||
message = response.choices[0].message
|
||||
content = message.content or ""
|
||||
|
||||
# deepseek-reasoner 的主要内容可能在 reasoning_content 中
|
||||
if not content.strip() and is_reasoner:
|
||||
reasoning = getattr(message, "reasoning_content", None)
|
||||
if reasoning:
|
||||
content = reasoning
|
||||
|
||||
return content.strip()
|
||||
except Exception as e:
|
||||
raise Exception(f"Error calling DeepSeek API: {str(e)}")
|
||||
72
providers/openai_provider.py
Normal file
72
providers/openai_provider.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import json
|
||||
import openai
|
||||
from typing import Optional, List, Dict, Any, Tuple
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from providers.base_provider import LLMProvider
|
||||
from services.api_key_service import ApiKeyService
|
||||
|
||||
SYSTEM_PROMPT = "你正在参与一场结构化辩论。请按照用户消息中的规则进行辩论,直接给出你的论点,不要重复提示词或历史记录。"
|
||||
|
||||
|
||||
class OpenAIProvider(LLMProvider):
|
||||
"""
|
||||
OpenAI API provider implementation
|
||||
"""
|
||||
|
||||
def __init__(self, db: Session, api_key: Optional[str] = None):
|
||||
if not api_key:
|
||||
api_key = ApiKeyService.get_api_key(db, "openai")
|
||||
|
||||
if api_key:
|
||||
self.client = openai.AsyncOpenAI(api_key=api_key)
|
||||
else:
|
||||
raise ValueError("OpenAI API key not found in database or provided")
|
||||
|
||||
def supports_tools(self) -> bool:
|
||||
return True
|
||||
|
||||
async def generate_response(self, model: str, prompt: str, max_tokens: Optional[int] = None) -> str:
|
||||
try:
|
||||
response = await self.client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "system", "content": SYSTEM_PROMPT},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
max_tokens=max_tokens or 500
|
||||
)
|
||||
return response.choices[0].message.content.strip()
|
||||
except Exception as e:
|
||||
raise Exception(f"Error calling OpenAI API: {str(e)}")
|
||||
|
||||
async def generate_response_with_tools(
|
||||
self,
|
||||
model: str,
|
||||
prompt: str,
|
||||
tools: List[Dict[str, Any]],
|
||||
max_tokens: Optional[int] = None
|
||||
) -> Tuple[str, List[Dict[str, Any]]]:
|
||||
try:
|
||||
response = await self.client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "system", "content": SYSTEM_PROMPT},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
tools=tools,
|
||||
tool_choice="auto",
|
||||
max_tokens=max_tokens or 500
|
||||
)
|
||||
message = response.choices[0].message
|
||||
text_content = message.content or ""
|
||||
tool_calls = []
|
||||
if message.tool_calls:
|
||||
for tc in message.tool_calls:
|
||||
tool_calls.append({
|
||||
"name": tc.function.name,
|
||||
"arguments": json.loads(tc.function.arguments)
|
||||
})
|
||||
return text_content.strip(), tool_calls
|
||||
except Exception as e:
|
||||
raise Exception(f"Error calling OpenAI API with tools: {str(e)}")
|
||||
49
providers/provider_factory.py
Normal file
49
providers/provider_factory.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
|
||||
class LLMProvider(ABC):
|
||||
"""
|
||||
Abstract base class for LLM providers
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self, db: Session, api_key: Optional[str] = None):
|
||||
"""
|
||||
Initialize the provider with a database session and optional API key
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def generate_response(self, model: str, prompt: str, max_tokens: Optional[int] = None) -> str:
|
||||
"""
|
||||
Generate a response from the LLM
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ProviderFactory:
|
||||
"""
|
||||
Factory class to create provider instances
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def create_provider(db: Session, provider_type: str, api_key: Optional[str] = None):
|
||||
"""
|
||||
Create a provider instance based on the type
|
||||
"""
|
||||
if provider_type.value == "openai":
|
||||
from providers.openai_provider import OpenAIProvider
|
||||
return OpenAIProvider(db, api_key)
|
||||
elif provider_type.value == "claude":
|
||||
from providers.claude_provider import ClaudeProvider
|
||||
return ClaudeProvider(db, api_key)
|
||||
elif provider_type.value == "qwen":
|
||||
from providers.qwen_provider import QwenProvider
|
||||
return QwenProvider(db, api_key)
|
||||
elif provider_type.value == "deepseek":
|
||||
from providers.deepseek_provider import DeepSeekProvider
|
||||
return DeepSeekProvider(db, api_key)
|
||||
else:
|
||||
raise ValueError(f"Unsupported provider type: {provider_type}")
|
||||
75
providers/qwen_provider.py
Normal file
75
providers/qwen_provider.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import json
|
||||
from typing import Optional, List, Dict, Any, Tuple
|
||||
from sqlalchemy.orm import Session
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from providers.base_provider import LLMProvider
|
||||
from services.api_key_service import ApiKeyService
|
||||
|
||||
SYSTEM_PROMPT = "你正在参与一场结构化辩论。请按照用户消息中的规则进行辩论,直接给出你的论点,不要重复提示词或历史记录。"
|
||||
|
||||
|
||||
class QwenProvider(LLMProvider):
|
||||
"""
|
||||
Qwen API provider implementation using DashScope OpenAI-compatible API
|
||||
"""
|
||||
|
||||
def __init__(self, db: Session, api_key: Optional[str] = None):
|
||||
if not api_key:
|
||||
api_key = ApiKeyService.get_api_key(db, "qwen")
|
||||
|
||||
if not api_key:
|
||||
raise ValueError("Qwen API key not found in database or provided")
|
||||
|
||||
self.client = AsyncOpenAI(
|
||||
api_key=api_key,
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
)
|
||||
|
||||
def supports_tools(self) -> bool:
|
||||
return True
|
||||
|
||||
async def generate_response(self, model: str, prompt: str, max_tokens: Optional[int] = None) -> str:
|
||||
try:
|
||||
response = await self.client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "system", "content": SYSTEM_PROMPT},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
max_tokens=max_tokens or 500
|
||||
)
|
||||
return response.choices[0].message.content.strip()
|
||||
except Exception as e:
|
||||
raise Exception(f"Error calling Qwen API: {str(e)}")
|
||||
|
||||
async def generate_response_with_tools(
|
||||
self,
|
||||
model: str,
|
||||
prompt: str,
|
||||
tools: List[Dict[str, Any]],
|
||||
max_tokens: Optional[int] = None
|
||||
) -> Tuple[str, List[Dict[str, Any]]]:
|
||||
try:
|
||||
response = await self.client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "system", "content": SYSTEM_PROMPT},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
tools=tools,
|
||||
tool_choice="auto",
|
||||
max_tokens=max_tokens or 500
|
||||
)
|
||||
message = response.choices[0].message
|
||||
text_content = message.content or ""
|
||||
tool_calls = []
|
||||
if message.tool_calls:
|
||||
for tc in message.tool_calls:
|
||||
tool_calls.append({
|
||||
"name": tc.function.name,
|
||||
"arguments": json.loads(tc.function.arguments)
|
||||
})
|
||||
return text_content.strip(), tool_calls
|
||||
except Exception as e:
|
||||
raise Exception(f"Error calling Qwen API with tools: {str(e)}")
|
||||
Reference in New Issue
Block a user