| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182 |
- from langchain_openai import ChatOpenAI
- from langchain_core.messages import HumanMessage
- from foundation.base.config import config_handler
- class ModelHandler:
- def __init__(self):
- self.config = config_handler
- def get_models(self):
- """
- 获取模型,默认为豆包
- """
- model_type = self.config.get("model", "MODEL_TYPE")
- if model_type == "doubao":
- return self._get_doubao_model()
- elif model_type == "qwen":
- return self._get_qwen_model()
- elif model_type == "deepseek":
- return self._get_deepseek_model()
- elif model_type == "qwen_local_1.5b":
- return self._get_qwen_local_1_5b_model()
- elif model_type == "qwen_local_14b":
- return self._get_qwen_local_14b_model()
- else:
- # 默认返回豆包
- return self._get_doubao_model()
- def _get_doubao_model(self):
- """
- 获取豆包模型
- """
- doubao_url = self.config.get("doubao", "DOUBAO_SERVER_URL")
- doubao_model_id = self.config.get("doubao", "DOUBAO_MODEL_ID")
- doubao_api_key = self.config.get("doubao", "DOUBAO_API_KEY")
- llm = ChatOpenAI(
- base_url=doubao_url,
- model=doubao_model_id,
- api_key=doubao_api_key,
- temperature=0.7,
- extra_body={
- "enable_thinking": False,
- })
-
- return llm
-
- def _get_qwen_model(self):
- """
- 获取通义千问模型
- """
- qwen_url = self.config.get("qwen", "QWEN_SERVER_URL")
- qwen_model_id = self.config.get("qwen", "QWEN_MODEL_ID")
- qwen_api_key = self.config.get("qwen", "QWEN_API_KEY")
- print(f"Debug - qwen_url: {qwen_url}")
- print(f"Debug - qwen_model_id: {qwen_model_id}")
- print(f"Debug - qwen_api_key: {qwen_api_key[:10]}..." if qwen_api_key else "Debug - qwen_api_key: None")
- llm = ChatOpenAI(
- base_url=qwen_url,
- model=qwen_model_id,
- api_key=qwen_api_key,
- temperature=0.7,
- extra_body={
- "enable_thinking": False,
- })
- return llm
-
- def _get_deepseek_model(self):
- """
- 获取通义千问模型
- """
- qwen_url = self.config.get("qwen", "QWEN_SERVER_URL")
- qwen_model_id = self.config.get("qwen", "QWEN_MODEL_ID")
- qwen_api_key = self.config.get("qwen", "QWEN_API_KEY")
- print(f"Debug - qwen_url: {qwen_url}")
- print(f"Debug - qwen_model_id: {qwen_model_id}")
- print(f"Debug - qwen_api_key: {qwen_api_key[:10]}..." if qwen_api_key else "Debug - qwen_api_key: None")
- llm = ChatOpenAI(
- base_url=qwen_url,
- model=qwen_model_id,
- api_key=qwen_api_key,
- temperature=0.7,
- extra_body={
- "enable_thinking": False,
- })
- return llm
-
- def _get_deepseek_model(self):
- """
- 获取通义千问模型
- """
- deepseek_url = self.config.get("deepseek", "DEEPSEEK_SERVER_URL")
- deepseek_model_id = self.config.get("deepseek", "DEEPSEEK_MODEL_ID")
- deepseek_api_key = self.config.get("deepseek", "DEEPSEEK_API_KEY")
- print(f"Debug - deepseek_url: {deepseek_url}")
- print(f"Debug - deepseek_model_id: {deepseek_model_id}")
- print(f"Debug - deepseek_api_key: {deepseek_api_key[:10]}..." if deepseek_api_key else "Debug - deepseek_api_key: None")
- llm = ChatOpenAI(
- base_url=deepseek_url,
- model=deepseek_model_id,
- api_key=deepseek_api_key,
- temperature=0.7,
- extra_body={
- "enable_thinking": False,
- })
- return llm
-
- def _get_gemini_model(self):
- """
- 获取通义千问模型
- """
- gemini_url = self.config.get("gemini", "GEMINI_SERVER_URL")
- gemini_model_id = self.config.get("gemini", "GEMINI_MODEL_ID")
- gemini_api_key = self.config.get("gemini", "GEMINI_API_KEY")
- print(f"Debug - gemini_url: {gemini_url}")
- print(f"Debug - gemini_model_id: {gemini_model_id}")
- print(f"Debug - gemini_api_key: {gemini_api_key[:10]}..." if gemini_api_key else "Debug - gemini_api_key: None")
- llm = ChatOpenAI(
- base_url=gemini_url,
- model=gemini_model_id,
- api_key=gemini_api_key,
- temperature=0.7,
- extra_body={
- "enable_thinking": False,
- })
- return llm
- def _get_qwen_local_1_5b_model(self):
- """
- 获取本地Qwen2.5-1.5B-Instruct模型
- """
- llm = ChatOpenAI(
- base_url="http://172.16.35.50:8000/v1",
- model="Qwen2.5-1.5B-Instruct",
- api_key="sk-dummy", # 本地模型使用虚拟API key
- temperature=0.7,
- )
- return llm
- def _get_qwen_local_14b_model(self):
- """
- 获取本地Qwen3-14B模型
- """
- llm = ChatOpenAI(
- base_url="http://172.16.35.50:8003/v1",
- model="Qwen3-14B",
- api_key="sk-dummy", # 本地模型使用虚拟API key
- temperature=0.7,
- )
- return llm
- # 创建全局实例
- model_handler = ModelHandler()
- def get_models():
- """
- 获取模型的全局函数
- 返回: (llm, chat, embed)
- """
- llm = model_handler.get_models()
- # 暂时返回相同的模型作为chat和embed
- return llm, llm, None
|