|
@@ -1,172 +1,217 @@
|
|
|
|
|
+#!/usr/bin/env python
|
|
|
|
|
+# -*- coding: utf-8 -*-
|
|
|
|
|
+
|
|
|
|
|
+'''
|
|
|
|
|
+@Project : lq-agent-api
|
|
|
|
|
+@File : utils.py
|
|
|
|
|
+@IDE : VsCode
|
|
|
|
|
+@Author :
|
|
|
|
|
+@Date : 2025-12-04 10:13:12
|
|
|
|
|
+
|
|
|
|
|
+=================================
|
|
|
|
|
+
|
|
|
|
|
+📋 方法总览 (Method Overview)
|
|
|
|
|
+
|
|
|
|
|
+🏗️ 核心模型管理:
|
|
|
|
|
+├── ModelHandler() # 模型处理器类
|
|
|
|
|
+├── get_models() # 获取模型的全局函数
|
|
|
|
|
+└── model_handler # 全局模型处理器实例
|
|
|
|
|
+
|
|
|
|
|
+🔍 模型获取方法:
|
|
|
|
|
+├── _get_doubao_model() # 获取豆包模型
|
|
|
|
|
+├── _get_qwen_model() # 获取通义千问模型
|
|
|
|
|
+├── _get_deepseek_model() # 获取DeepSeek模型
|
|
|
|
|
+├── _get_gemini_model() # 获取Gemini模型
|
|
|
|
|
+├── _get_lq_qwen3_8b_model() # 获取本地Qwen3-8B模型
|
|
|
|
|
+└── _get_qwen_local_14b_model() # 获取本地Qwen3-14B模型
|
|
|
|
|
+'''
|
|
|
|
|
+
|
|
|
from langchain_openai import ChatOpenAI
|
|
from langchain_openai import ChatOpenAI
|
|
|
from langchain_core.messages import HumanMessage
|
|
from langchain_core.messages import HumanMessage
|
|
|
|
|
|
|
|
from foundation.base.config import config_handler
|
|
from foundation.base.config import config_handler
|
|
|
|
|
+from foundation.logger.loggering import server_logger as logger
|
|
|
|
|
|
|
|
|
|
|
|
|
class ModelHandler:
|
|
class ModelHandler:
|
|
|
|
|
+ """
|
|
|
|
|
+ AI模型处理器类,用于管理多种AI模型的创建和配置
|
|
|
|
|
+
|
|
|
|
|
+ 支持的模型类型:
|
|
|
|
|
+ - doubao: 豆包模型
|
|
|
|
|
+ - qwen: 通义千问模型
|
|
|
|
|
+ - deepseek: DeepSeek模型
|
|
|
|
|
+ - gemini: Gemini模型
|
|
|
|
|
+ - lq_qwen3_8b: 本地Qwen3-8B模型
|
|
|
|
|
+ - qwen_local_14b: 本地Qwen3-14B模型
|
|
|
|
|
+ """
|
|
|
|
|
|
|
|
- def __init__(self):
|
|
|
|
|
- self.config = config_handler
|
|
|
|
|
-
|
|
|
|
|
- def get_models(self):
|
|
|
|
|
- """
|
|
|
|
|
- 获取模型,默认为豆包
|
|
|
|
|
- """
|
|
|
|
|
- model_type = self.config.get("model", "MODEL_TYPE")
|
|
|
|
|
- if model_type == "doubao":
|
|
|
|
|
- return self._get_doubao_model()
|
|
|
|
|
- elif model_type == "qwen":
|
|
|
|
|
- return self._get_qwen_model()
|
|
|
|
|
- elif model_type == "deepseek":
|
|
|
|
|
- return self._get_deepseek_model()
|
|
|
|
|
- elif model_type == "qwen_local_1.5b":
|
|
|
|
|
- return self._get_qwen_local_1_5b_model()
|
|
|
|
|
- elif model_type == "qwen_local_14b":
|
|
|
|
|
- return self._get_qwen_local_14b_model()
|
|
|
|
|
- else:
|
|
|
|
|
- # 默认返回豆包
|
|
|
|
|
- return self._get_doubao_model()
|
|
|
|
|
-
|
|
|
|
|
-
|
|
|
|
|
- def _get_doubao_model(self):
|
|
|
|
|
- """
|
|
|
|
|
- 获取豆包模型
|
|
|
|
|
- """
|
|
|
|
|
- doubao_url = self.config.get("doubao", "DOUBAO_SERVER_URL")
|
|
|
|
|
- doubao_model_id = self.config.get("doubao", "DOUBAO_MODEL_ID")
|
|
|
|
|
- doubao_api_key = self.config.get("doubao", "DOUBAO_API_KEY")
|
|
|
|
|
-
|
|
|
|
|
-
|
|
|
|
|
- llm = ChatOpenAI(
|
|
|
|
|
- base_url=doubao_url,
|
|
|
|
|
- model=doubao_model_id,
|
|
|
|
|
- api_key=doubao_api_key,
|
|
|
|
|
- temperature=0.7,
|
|
|
|
|
- extra_body={
|
|
|
|
|
- "enable_thinking": False,
|
|
|
|
|
- })
|
|
|
|
|
-
|
|
|
|
|
- return llm
|
|
|
|
|
|
|
+ def __init__(self):
|
|
|
|
|
+ """
|
|
|
|
|
+ 初始化模型处理器
|
|
|
|
|
+
|
|
|
|
|
+ 加载配置处理器,用于后续读取各种模型的配置信息
|
|
|
|
|
+ """
|
|
|
|
|
+ self.config = config_handler
|
|
|
|
|
+
|
|
|
|
|
+ def get_models(self):
|
|
|
|
|
+ """
|
|
|
|
|
+ 获取AI模型实例
|
|
|
|
|
+
|
|
|
|
|
+ Returns:
|
|
|
|
|
+ ChatOpenAI: 配置好的AI模型实例
|
|
|
|
|
+
|
|
|
|
|
+ Note:
|
|
|
|
|
+ 根据配置文件中的MODEL_TYPE参数选择对应模型
|
|
|
|
|
+ 支持的模型类型:doubao, qwen, deepseek, lq_qwen3_8b, qwen_local_14b
|
|
|
|
|
+ 默认返回豆包模型
|
|
|
|
|
+ """
|
|
|
|
|
+ model_type = self.config.get("model", "MODEL_TYPE")
|
|
|
|
|
+ logger.info(f"正在初始化AI模型,模型类型: {model_type}")
|
|
|
|
|
+
|
|
|
|
|
+ if model_type == "doubao":
|
|
|
|
|
+ model = self._get_doubao_model()
|
|
|
|
|
+ if model_type == "gemini":
|
|
|
|
|
+ model = self._get_gemini_model()
|
|
|
|
|
+ elif model_type == "qwen":
|
|
|
|
|
+ model = self._get_qwen_model()
|
|
|
|
|
+ elif model_type == "deepseek":
|
|
|
|
|
+ model = self._get_deepseek_model()
|
|
|
|
|
+ elif model_type == "lq_qwen3_8b":
|
|
|
|
|
+ model = self._get_lq_qwen3_8b_model()
|
|
|
|
|
+ elif model_type == "qwen_local_14b":
|
|
|
|
|
+ model = self._get_qwen_local_14b_model()
|
|
|
|
|
+ else:
|
|
|
|
|
+ # 默认返回豆包
|
|
|
|
|
+ logger.warning(f"未知的模型类型 '{model_type}',使用默认gemini模型")
|
|
|
|
|
+ model = model = self._get_gemini_model()
|
|
|
|
|
+
|
|
|
|
|
+ logger.info(f"AI模型初始化完成: {model_type}")
|
|
|
|
|
+ return model
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+ def _get_doubao_model(self):
|
|
|
|
|
+ """
|
|
|
|
|
+ 获取豆包模型
|
|
|
|
|
+
|
|
|
|
|
+ Returns:
|
|
|
|
|
+ ChatOpenAI: 配置好的豆包模型实例
|
|
|
|
|
+ """
|
|
|
|
|
+ doubao_url = self.config.get("doubao", "DOUBAO_SERVER_URL")
|
|
|
|
|
+ doubao_model_id = self.config.get("doubao", "DOUBAO_MODEL_ID")
|
|
|
|
|
+ doubao_api_key = self.config.get("doubao", "DOUBAO_API_KEY")
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+ llm = ChatOpenAI(
|
|
|
|
|
+ base_url=doubao_url,
|
|
|
|
|
+ model=doubao_model_id,
|
|
|
|
|
+ api_key=doubao_api_key,
|
|
|
|
|
+ temperature=0.7,
|
|
|
|
|
+ extra_body={
|
|
|
|
|
+ "enable_thinking": False,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ return llm
|
|
|
|
|
|
|
|
|
|
|
|
|
- def _get_qwen_model(self):
|
|
|
|
|
- """
|
|
|
|
|
- 获取通义千问模型
|
|
|
|
|
- """
|
|
|
|
|
- qwen_url = self.config.get("qwen", "QWEN_SERVER_URL")
|
|
|
|
|
- qwen_model_id = self.config.get("qwen", "QWEN_MODEL_ID")
|
|
|
|
|
- qwen_api_key = self.config.get("qwen", "QWEN_API_KEY")
|
|
|
|
|
-
|
|
|
|
|
- print(f"Debug - qwen_url: {qwen_url}")
|
|
|
|
|
- print(f"Debug - qwen_model_id: {qwen_model_id}")
|
|
|
|
|
- print(f"Debug - qwen_api_key: {qwen_api_key[:10]}..." if qwen_api_key else "Debug - qwen_api_key: None")
|
|
|
|
|
-
|
|
|
|
|
- llm = ChatOpenAI(
|
|
|
|
|
- base_url=qwen_url,
|
|
|
|
|
- model=qwen_model_id,
|
|
|
|
|
- api_key=qwen_api_key,
|
|
|
|
|
- temperature=0.7,
|
|
|
|
|
- extra_body={
|
|
|
|
|
- "enable_thinking": False,
|
|
|
|
|
- })
|
|
|
|
|
-
|
|
|
|
|
- return llm
|
|
|
|
|
-
|
|
|
|
|
- def _get_deepseek_model(self):
|
|
|
|
|
- """
|
|
|
|
|
- 获取通义千问模型
|
|
|
|
|
- """
|
|
|
|
|
- qwen_url = self.config.get("qwen", "QWEN_SERVER_URL")
|
|
|
|
|
- qwen_model_id = self.config.get("qwen", "QWEN_MODEL_ID")
|
|
|
|
|
- qwen_api_key = self.config.get("qwen", "QWEN_API_KEY")
|
|
|
|
|
-
|
|
|
|
|
- print(f"Debug - qwen_url: {qwen_url}")
|
|
|
|
|
- print(f"Debug - qwen_model_id: {qwen_model_id}")
|
|
|
|
|
- print(f"Debug - qwen_api_key: {qwen_api_key[:10]}..." if qwen_api_key else "Debug - qwen_api_key: None")
|
|
|
|
|
-
|
|
|
|
|
- llm = ChatOpenAI(
|
|
|
|
|
- base_url=qwen_url,
|
|
|
|
|
- model=qwen_model_id,
|
|
|
|
|
- api_key=qwen_api_key,
|
|
|
|
|
- temperature=0.7,
|
|
|
|
|
- extra_body={
|
|
|
|
|
- "enable_thinking": False,
|
|
|
|
|
- })
|
|
|
|
|
-
|
|
|
|
|
- return llm
|
|
|
|
|
|
|
+ def _get_qwen_model(self):
|
|
|
|
|
+ """
|
|
|
|
|
+ 获取通义千问模型
|
|
|
|
|
+
|
|
|
|
|
+ Returns:
|
|
|
|
|
+ ChatOpenAI: 配置好的通义千问模型实例
|
|
|
|
|
+ """
|
|
|
|
|
+ qwen_url = self.config.get("qwen", "QWEN_SERVER_URL")
|
|
|
|
|
+ qwen_model_id = self.config.get("qwen", "QWEN_MODEL_ID")
|
|
|
|
|
+ qwen_api_key = self.config.get("qwen", "QWEN_API_KEY")
|
|
|
|
|
+
|
|
|
|
|
+ llm = ChatOpenAI(
|
|
|
|
|
+ base_url=qwen_url,
|
|
|
|
|
+ model=qwen_model_id,
|
|
|
|
|
+ api_key=qwen_api_key,
|
|
|
|
|
+ temperature=0.7,
|
|
|
|
|
+ extra_body={
|
|
|
|
|
+ "enable_thinking": False,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ return llm
|
|
|
|
|
|
|
|
- def _get_deepseek_model(self):
|
|
|
|
|
- """
|
|
|
|
|
- 获取通义千问模型
|
|
|
|
|
- """
|
|
|
|
|
- deepseek_url = self.config.get("deepseek", "DEEPSEEK_SERVER_URL")
|
|
|
|
|
- deepseek_model_id = self.config.get("deepseek", "DEEPSEEK_MODEL_ID")
|
|
|
|
|
- deepseek_api_key = self.config.get("deepseek", "DEEPSEEK_API_KEY")
|
|
|
|
|
-
|
|
|
|
|
- print(f"Debug - deepseek_url: {deepseek_url}")
|
|
|
|
|
- print(f"Debug - deepseek_model_id: {deepseek_model_id}")
|
|
|
|
|
- print(f"Debug - deepseek_api_key: {deepseek_api_key[:10]}..." if deepseek_api_key else "Debug - deepseek_api_key: None")
|
|
|
|
|
-
|
|
|
|
|
- llm = ChatOpenAI(
|
|
|
|
|
- base_url=deepseek_url,
|
|
|
|
|
- model=deepseek_model_id,
|
|
|
|
|
- api_key=deepseek_api_key,
|
|
|
|
|
- temperature=0.7,
|
|
|
|
|
- extra_body={
|
|
|
|
|
- "enable_thinking": False,
|
|
|
|
|
- })
|
|
|
|
|
-
|
|
|
|
|
- return llm
|
|
|
|
|
-
|
|
|
|
|
- def _get_gemini_model(self):
|
|
|
|
|
- """
|
|
|
|
|
- 获取通义千问模型
|
|
|
|
|
- """
|
|
|
|
|
- gemini_url = self.config.get("gemini", "GEMINI_SERVER_URL")
|
|
|
|
|
- gemini_model_id = self.config.get("gemini", "GEMINI_MODEL_ID")
|
|
|
|
|
- gemini_api_key = self.config.get("gemini", "GEMINI_API_KEY")
|
|
|
|
|
-
|
|
|
|
|
- print(f"Debug - gemini_url: {gemini_url}")
|
|
|
|
|
- print(f"Debug - gemini_model_id: {gemini_model_id}")
|
|
|
|
|
- print(f"Debug - gemini_api_key: {gemini_api_key[:10]}..." if gemini_api_key else "Debug - gemini_api_key: None")
|
|
|
|
|
-
|
|
|
|
|
- llm = ChatOpenAI(
|
|
|
|
|
- base_url=gemini_url,
|
|
|
|
|
- model=gemini_model_id,
|
|
|
|
|
- api_key=gemini_api_key,
|
|
|
|
|
- temperature=0.7,
|
|
|
|
|
- extra_body={
|
|
|
|
|
- "enable_thinking": False,
|
|
|
|
|
- })
|
|
|
|
|
-
|
|
|
|
|
- return llm
|
|
|
|
|
-
|
|
|
|
|
- def _get_qwen_local_1_5b_model(self):
|
|
|
|
|
- """
|
|
|
|
|
- 获取本地Qwen2.5-1.5B-Instruct模型
|
|
|
|
|
- """
|
|
|
|
|
- llm = ChatOpenAI(
|
|
|
|
|
- base_url="http://172.16.35.50:8000/v1",
|
|
|
|
|
- model="Qwen2.5-1.5B-Instruct",
|
|
|
|
|
- api_key="sk-dummy", # 本地模型使用虚拟API key
|
|
|
|
|
- temperature=0.7,
|
|
|
|
|
- )
|
|
|
|
|
-
|
|
|
|
|
- return llm
|
|
|
|
|
-
|
|
|
|
|
- def _get_qwen_local_14b_model(self):
|
|
|
|
|
- """
|
|
|
|
|
- 获取本地Qwen3-14B模型
|
|
|
|
|
- """
|
|
|
|
|
- llm = ChatOpenAI(
|
|
|
|
|
- base_url="http://172.16.35.50:8003/v1",
|
|
|
|
|
- model="Qwen3-14B",
|
|
|
|
|
- api_key="sk-dummy", # 本地模型使用虚拟API key
|
|
|
|
|
- temperature=0.7,
|
|
|
|
|
- )
|
|
|
|
|
-
|
|
|
|
|
- return llm
|
|
|
|
|
|
|
+ def _get_deepseek_model(self):
|
|
|
|
|
+ """
|
|
|
|
|
+ 获取DeepSeek模型
|
|
|
|
|
+
|
|
|
|
|
+ Returns:
|
|
|
|
|
+ ChatOpenAI: 配置好的DeepSeek模型实例
|
|
|
|
|
+ """
|
|
|
|
|
+ deepseek_url = self.config.get("deepseek", "DEEPSEEK_SERVER_URL")
|
|
|
|
|
+ deepseek_model_id = self.config.get("deepseek", "DEEPSEEK_MODEL_ID")
|
|
|
|
|
+ deepseek_api_key = self.config.get("deepseek", "DEEPSEEK_API_KEY")
|
|
|
|
|
+
|
|
|
|
|
+ llm = ChatOpenAI(
|
|
|
|
|
+ base_url=deepseek_url,
|
|
|
|
|
+ model=deepseek_model_id,
|
|
|
|
|
+ api_key=deepseek_api_key,
|
|
|
|
|
+ temperature=0.7,
|
|
|
|
|
+ extra_body={
|
|
|
|
|
+ "enable_thinking": False,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ return llm
|
|
|
|
|
+
|
|
|
|
|
+ def _get_gemini_model(self):
|
|
|
|
|
+ """
|
|
|
|
|
+ 获取Gemini模型
|
|
|
|
|
+
|
|
|
|
|
+ Returns:
|
|
|
|
|
+ ChatOpenAI: 配置好的Gemini模型实例
|
|
|
|
|
+ """
|
|
|
|
|
+ gemini_url = self.config.get("gemini", "GEMINI_SERVER_URL")
|
|
|
|
|
+ gemini_model_id = self.config.get("gemini", "GEMINI_MODEL_ID")
|
|
|
|
|
+ gemini_api_key = self.config.get("gemini", "GEMINI_API_KEY")
|
|
|
|
|
+
|
|
|
|
|
+ llm = ChatOpenAI(
|
|
|
|
|
+ base_url=gemini_url,
|
|
|
|
|
+ model=gemini_model_id,
|
|
|
|
|
+ api_key=gemini_api_key,
|
|
|
|
|
+ temperature=0.7,
|
|
|
|
|
+ # extra_body={
|
|
|
|
|
+ # "enable_thinking": False,
|
|
|
|
|
+ # }
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ return llm
|
|
|
|
|
+
|
|
|
|
|
+ def _get_lq_qwen3_8b_model(self):
|
|
|
|
|
+ """
|
|
|
|
|
+ 获取本地Qwen3-8B-Instruct模型
|
|
|
|
|
+
|
|
|
|
|
+ Returns:
|
|
|
|
|
+ ChatOpenAI: 配置好的本地Qwen3-8B模型实例
|
|
|
|
|
+ """
|
|
|
|
|
+ llm = ChatOpenAI(
|
|
|
|
|
+ base_url="http://192.168.91.253:9000/v1",
|
|
|
|
|
+ model="/mnt/Qwen3-8B",
|
|
|
|
|
+ api_key="dummy", # 本地模型使用虚拟API key
|
|
|
|
|
+ temperature=0.7,
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ return llm
|
|
|
|
|
+
|
|
|
|
|
+ def _get_qwen_local_14b_model(self):
|
|
|
|
|
+ """
|
|
|
|
|
+ 获取本地Qwen3-14B模型
|
|
|
|
|
+
|
|
|
|
|
+ Returns:
|
|
|
|
|
+ ChatOpenAI: 配置好的本地Qwen3-14B模型实例
|
|
|
|
|
+ """
|
|
|
|
|
+ llm = ChatOpenAI(
|
|
|
|
|
+ base_url="http://172.16.35.50:8003/v1",
|
|
|
|
|
+ model="Qwen3-14B",
|
|
|
|
|
+ api_key="sk-dummy", # 本地模型使用虚拟API key
|
|
|
|
|
+ temperature=0.7,
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ return llm
|
|
|
|
|
|
|
|
|
|
|
|
|
# 创建全局实例
|
|
# 创建全局实例
|
|
@@ -175,7 +220,13 @@ model_handler = ModelHandler()
|
|
|
def get_models():
|
|
def get_models():
|
|
|
"""
|
|
"""
|
|
|
获取模型的全局函数
|
|
获取模型的全局函数
|
|
|
- 返回: (llm, chat, embed)
|
|
|
|
|
|
|
+
|
|
|
|
|
+ Returns:
|
|
|
|
|
+ tuple: (llm, chat, embed) - LLM模型、聊天模型和嵌入模型实例
|
|
|
|
|
+ 注意:当前llm和chat使用相同模型实例,embed暂时返回None
|
|
|
|
|
+
|
|
|
|
|
+ Note:
|
|
|
|
|
+ 这是一个便捷函数,直接使用全局model_handler实例获取模型
|
|
|
"""
|
|
"""
|
|
|
llm = model_handler.get_models()
|
|
llm = model_handler.get_models()
|
|
|
# 暂时返回相同的模型作为chat和embed
|
|
# 暂时返回相同的模型作为chat和embed
|