| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820 |
- #!/usr/bin/env python
- # -*- coding: utf-8 -*-
- """
- AI模型处理器
- 用于管理生成、与嵌入模型的创建和配置
- 支持的模型类型:
- - doubao: 豆包模型
- - qwen: 通义千问模型
- - deepseek: DeepSeek模型
- - gemini: Gemini模型
- - lq_qwen3_8b: 本地Qwen3-8B模型
- - lq_qwen3_8b_lq_lora: 本地Qwen3-8B-lq-lora模型
- - lq_qwen3_4b: 本地Qwen3-4B模型
- - qwen_local_14b: 本地Qwen3-14B模型
- - lq_qwen3_8b_emd: 本地Qwen3-Embedding-8B嵌入模型
- - siliconflow_embed: 硅基流动Qwen3-Embedding-8B嵌入模型
- - lq_bge_reranker_v2_m3: 本地BGE-reranker-v2-m3重排序模型
- """
- # 禁用 transformers 的深度学习框架检测,避免启动时耗时扫描
- import os
- os.environ["TRANSFORMERS_VERBOSITY"] = "error"
- os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"
- import requests
- from langchain_openai import ChatOpenAI, OpenAIEmbeddings
- from foundation.infrastructure.config.config import config_handler
- from foundation.observability.logger.loggering import review_logger as logger
- class ModelConnectionError(Exception):
- """模型连接错误"""
- pass
- class ModelConfigError(Exception):
- """模型配置错误"""
- pass
- class ModelAPIError(Exception):
- """模型API调用错误"""
- pass
- class ModelHandler:
- """
- AI模型处理器类,用于管理多种AI模型的创建和配置
- """
- # 模型连接超时时间配置(秒)
- CONNECTION_TIMEOUT = 30
- REQUEST_TIMEOUT = 120
- MAX_RETRIES = 2
- def __init__(self):
- """
- 初始化模型处理器
- 加载配置处理器,用于后续读取各种模型的配置信息
- """
- self.config = config_handler
- self._model_cache = {} # 模型实例缓存
- def _check_connection(self, base_url: str, api_key: str = None, timeout: int = 5) -> bool:
- """
- 检查模型服务连接是否可用
- Args:
- base_url: 模型服务地址
- api_key: API密钥(可选)
- timeout: 超时时间(秒)
- Returns:
- bool: 连接是否可用
- """
- try:
- # 构造健康检查URL
- health_url = f"{base_url.rstrip('/')}/models"
- headers = {}
- if api_key and api_key != "dummy":
- headers["Authorization"] = f"Bearer {api_key}"
- response = requests.get(
- health_url,
- headers=headers,
- timeout=timeout
- )
- # 200-299 都认为可用
- return 200 <= response.status_code < 300
- except requests.exceptions.Timeout:
- logger.warning(f"连接超时: {base_url}")
- return False
- except requests.exceptions.ConnectionError as e:
- logger.warning(f"连接错误: {base_url}, 错误: {e}")
- return False
- except Exception as e:
- logger.warning(f"连接检查异常: {base_url}, 错误: {e}")
- return False
- def _handle_model_error(self, model_name: str, error: Exception, fallback_model=None):
- """
- 统一处理模型错误
- Args:
- model_name: 模型名称
- error: 异常对象
- fallback_model: 降级模型实例(可选)
- Returns:
- 降级模型实例,如果不可用则返回None
- """
- error_type = type(error).__name__
- error_msg = str(error)
- logger.error(f"模型初始化失败 [{model_name}]: {error_type} - {error_msg}")
- # 如果提供了降级模型,记录日志并返回
- if fallback_model:
- logger.warning(f"使用降级模型: {fallback_model.__class__.__name__}")
- return fallback_model
- # 如果没有降级模型,返回None让调用方处理
- return None
- def get_models(self):
- """
- 获取AI模型实例
- Returns:
- ChatOpenAI: 配置好的AI模型实例
- Note:
- 根据配置文件中的MODEL_TYPE参数选择对应模型
- 支持的模型类型:doubao, qwen, deepseek, lq_qwen3_8b, lq_qwen3_8b_lora, lq_qwen3_4b, qwen_local_14b
- 默认返回豆包模型
- """
- model_type = self.config.get("model", "MODEL_TYPE")
- logger.info(f"正在初始化AI模型,模型类型: {model_type}")
- # 检查缓存
- cache_key = f"chat_{model_type}"
- if cache_key in self._model_cache:
- logger.info(f"使用缓存的模型: {model_type}")
- return self._model_cache[cache_key]
- model = None
- try:
- if model_type == "doubao":
- model = self._get_doubao_model()
- elif model_type == "gemini":
- model = self._get_gemini_model()
- elif model_type == "qwen":
- model = self._get_qwen_model()
- elif model_type == "qwen3_30b":
- model = self._get_qwen3_30b_model()
- elif model_type == "deepseek":
- model = self._get_deepseek_model()
- elif model_type == "lq_qwen3_8b":
- model = self._get_lq_qwen3_8b_model()
- elif model_type == "lq_qwen3_8b_lq_lora":
- model = self._get_lq_qwen3_8b_lora_model()
- elif model_type == "lq_qwen3_4b":
- model = self._get_lq_qwen3_4b_model()
- elif model_type == "qwen_local_14b":
- model = self._get_qwen_local_14b_model()
- else:
- # 默认返回gemini
- logger.warning(f"未知的模型类型 '{model_type}',使用默认gemini模型")
- model = self._get_gemini_model()
- if model:
- self._model_cache[cache_key] = model
- logger.info(f"AI模型初始化完成: {model_type}")
- return model
- else:
- raise ModelAPIError(f"模型初始化返回None: {model_type}")
- except Exception as e:
- logger.error(f"获取模型失败 [{model_type}]: {e}")
- # 尝试使用gemini作为降级方案
- if model_type != "gemini":
- logger.info("尝试使用Gemini模型作为降级方案")
- try:
- fallback_model = self._get_gemini_model()
- if fallback_model:
- self._model_cache[cache_key] = fallback_model
- logger.warning(f"已切换到Gemini降级模型")
- return fallback_model
- except Exception as fallback_error:
- logger.error(f"降级模型也失败: {fallback_error}")
- # 如果所有模型都失败,抛出异常
- raise ModelConnectionError(f"无法初始化任何模型服务: {e}")
- def get_model_by_name(self, model_type: str = None):
- """
- 根据模型名称动态获取指定的AI模型实例
- Args:
- model_type: 模型类型名称,如果为None则使用配置文件中的默认模型
- 支持的模型类型:doubao, qwen, qwen3_30b, deepseek, gemini,
- lq_qwen3_8b, lq_qwen3_8b_lq_lora,
- lq_qwen3_4b, qwen_local_14b
- Returns:
- ChatOpenAI: 配置好的AI模型实例
- Note:
- 该方法支持动态切换模型,不受配置文件中的默认MODEL_TYPE限制
- 如果model_type为None,则使用配置文件中的默认模型
- 如果model_type无效,则使用gemini作为降级模型
- """
- # 如果未指定模型类型,使用配置文件中的默认模型
- if model_type is None:
- model_type = self.config.get("model", "MODEL_TYPE")
- logger.info(f"动态获取AI模型,模型类型: {model_type}")
- # 检查缓存
- cache_key = f"chat_{model_type}"
- if cache_key in self._model_cache:
- logger.info(f"使用缓存的模型: {model_type}")
- return self._model_cache[cache_key]
- model = None
- try:
- if model_type == "doubao":
- model = self._get_doubao_model()
- elif model_type == "gemini":
- model = self._get_gemini_model()
- elif model_type == "qwen":
- model = self._get_qwen_model()
- elif model_type == "qwen3_30b":
- model = self._get_qwen3_30b_model()
- elif model_type == "deepseek":
- model = self._get_deepseek_model()
- elif model_type == "lq_qwen3_8b":
- model = self._get_lq_qwen3_8b_model()
- elif model_type == "lq_qwen3_8b_lq_lora":
- model = self._get_lq_qwen3_8b_lora_model()
- elif model_type == "lq_qwen3_4b":
- model = self._get_lq_qwen3_4b_model()
- elif model_type == "qwen_local_14b":
- model = self._get_qwen_local_14b_model()
- else:
- # 默认返回gemini
- logger.warning(f"未知的模型类型 '{model_type}',使用默认gemini模型")
- model = self._get_gemini_model()
- if model:
- self._model_cache[cache_key] = model
- logger.info(f"AI模型动态初始化完成: {model_type}")
- return model
- else:
- raise ModelAPIError(f"模型初始化返回None: {model_type}")
- except Exception as e:
- logger.error(f"动态获取模型失败 [{model_type}]: {e}")
- # 尝试使用gemini作为降级方案
- if model_type != "gemini":
- logger.info("尝试使用Gemini模型作为降级方案")
- try:
- fallback_model = self._get_gemini_model()
- if fallback_model:
- self._model_cache[cache_key] = fallback_model
- logger.warning(f"已切换到Gemini降级模型")
- return fallback_model
- except Exception as fallback_error:
- logger.error(f"降级模型也失败: {fallback_error}")
- # 如果所有模型都失败,抛出异常
- raise ModelConnectionError(f"无法初始化任何模型服务: {e}")
- def get_embedding_model(self):
- """
- 获取Embedding模型实例
- Returns:
- OpenAIEmbeddings: 配置好的Embedding模型实例
- Note:
- 根据配置文件中的EMBEDDING_MODEL_TYPE参数选择对应模型
- 支持的模型类型:lq_qwen3_8b_emd, siliconflow_embed
- 默认返回本地 lq_qwen3_8b_emd 模型
- """
- embedding_model_type = self.config.get("model", "EMBEDDING_MODEL_TYPE", "lq_qwen3_8b_emd")
- logger.info(f"正在初始化Embedding模型,模型类型: {embedding_model_type}")
- # 检查缓存
- cache_key = f"embed_{embedding_model_type}"
- if cache_key in self._model_cache:
- logger.info(f"使用缓存的Embedding模型: {embedding_model_type}")
- return self._model_cache[cache_key]
- model = None
- try:
- if embedding_model_type == "siliconflow_embed":
- model = self._get_siliconflow_embedding_model()
- elif embedding_model_type == "lq_qwen3_8b_emd":
- model = self._get_lq_qwen3_8b_emd()
- else:
- # 默认返回本地模型
- logger.warning(f"未知的Embedding模型类型 '{embedding_model_type}',使用默认本地模型")
- model = self._get_lq_qwen3_8b_emd()
- if model:
- self._model_cache[cache_key] = model
- logger.info(f"Embedding模型初始化完成: {embedding_model_type}")
- return model
- else:
- raise ModelAPIError(f"Embedding模型初始化返回None: {embedding_model_type}")
- except Exception as e:
- logger.error(f"获取Embedding模型失败 [{embedding_model_type}]: {e}")
- raise ModelConnectionError(f"无法初始化Embedding模型服务: {e}")
- def _get_doubao_model(self):
- """
- 获取豆包模型
- Returns:
- ChatOpenAI: 配置好的豆包模型实例
- """
- try:
- doubao_url = self.config.get("doubao", "DOUBAO_SERVER_URL")
- doubao_model_id = self.config.get("doubao", "DOUBAO_MODEL_ID")
- doubao_api_key = self.config.get("doubao", "DOUBAO_API_KEY")
- # 验证配置完整性
- if not all([doubao_url, doubao_model_id, doubao_api_key]):
- missing = []
- if not doubao_url:
- missing.append("DOUBAO_SERVER_URL")
- if not doubao_model_id:
- missing.append("DOUBAO_MODEL_ID")
- if not doubao_api_key:
- missing.append("DOUBAO_API_KEY")
- raise ModelConfigError(f"豆包模型配置不完整,缺少: {', '.join(missing)}")
- # 检查连接
- if not self._check_connection(doubao_url, doubao_api_key):
- logger.warning(f"豆包模型服务连接失败: {doubao_url}")
- raise ModelConnectionError(f"无法连接到豆包模型服务: {doubao_url}")
- llm = ChatOpenAI(
- base_url=doubao_url,
- model=doubao_model_id,
- api_key=doubao_api_key,
- temperature=0.7,
- timeout=self.REQUEST_TIMEOUT,
- extra_body={
- "enable_thinking": False,
- })
- logger.info(f"豆包模型初始化成功: {doubao_model_id}")
- return llm
- except ModelConfigError:
- raise
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"豆包模型初始化异常: {e}")
- return self._handle_model_error("doubao", error)
- def _get_qwen_model(self):
- """
- 获取通义千问模型
- Returns:
- ChatOpenAI: 配置好的通义千问模型实例
- """
- try:
- qwen_url = self.config.get("qwen", "QWEN_SERVER_URL")
- qwen_model_id = self.config.get("qwen", "QWEN_MODEL_ID")
- qwen_api_key = self.config.get("qwen", "QWEN_API_KEY")
- # 验证配置完整性
- if not all([qwen_url, qwen_model_id, qwen_api_key]):
- missing = []
- if not qwen_url:
- missing.append("QWEN_SERVER_URL")
- if not qwen_model_id:
- missing.append("QWEN_MODEL_ID")
- if not qwen_api_key:
- missing.append("QWEN_API_KEY")
- raise ModelConfigError(f"通义千问模型配置不完整,缺少: {', '.join(missing)}")
- # 检查连接
- if not self._check_connection(qwen_url, qwen_api_key):
- logger.warning(f"通义千问模型服务连接失败: {qwen_url}")
- raise ModelConnectionError(f"无法连接到通义千问模型服务: {qwen_url}")
- llm = ChatOpenAI(
- base_url=qwen_url,
- model=qwen_model_id,
- api_key=qwen_api_key,
- temperature=0.7,
- timeout=self.REQUEST_TIMEOUT,
- extra_body={
- "enable_thinking": False,
- })
- logger.info(f"通义千问模型初始化成功: {qwen_model_id}")
- return llm
- except ModelConfigError:
- raise
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"通义千问模型初始化异常: {e}")
- return self._handle_model_error("qwen", error)
- def _get_qwen3_30b_model(self):
- """
- 获取Qwen3-30B模型
- Returns:
- ChatOpenAI: 配置好的Qwen3-30B模型实例
- """
- try:
- qwen3_30b_url = self.config.get("qwen3_30b", "QWEN3_30B_SERVER_URL")
- qwen3_30b_model_id = self.config.get("qwen3_30b", "QWEN3_30B_MODEL_ID")
- qwen3_30b_api_key = self.config.get("qwen3_30b", "QWEN3_30B_API_KEY")
- # 验证配置完整性
- if not all([qwen3_30b_url, qwen3_30b_model_id, qwen3_30b_api_key]):
- missing = []
- if not qwen3_30b_url:
- missing.append("QWEN3_30B_SERVER_URL")
- if not qwen3_30b_model_id:
- missing.append("QWEN3_30B_MODEL_ID")
- if not qwen3_30b_api_key:
- missing.append("QWEN3_30B_API_KEY")
- raise ModelConfigError(f"Qwen3-30B模型配置不完整,缺少: {', '.join(missing)}")
- # 检查连接
- if not self._check_connection(qwen3_30b_url, qwen3_30b_api_key):
- logger.warning(f"Qwen3-30B模型服务连接失败: {qwen3_30b_url}")
- raise ModelConnectionError(f"无法连接到Qwen3-30B模型服务: {qwen3_30b_url}")
- llm = ChatOpenAI(
- base_url=qwen3_30b_url,
- model=qwen3_30b_model_id,
- api_key=qwen3_30b_api_key,
- temperature=0.7,
- timeout=self.REQUEST_TIMEOUT,
- extra_body={
- "enable_thinking": False,
- })
- logger.info(f"Qwen3-30B模型初始化成功: {qwen3_30b_model_id}")
- return llm
- except ModelConfigError:
- raise
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"Qwen3-30B模型初始化异常: {e}")
- return self._handle_model_error("qwen3_30b", error)
- def _get_deepseek_model(self):
- """
- 获取DeepSeek模型
- Returns:
- ChatOpenAI: 配置好的DeepSeek模型实例
- """
- try:
- deepseek_url = self.config.get("deepseek", "DEEPSEEK_SERVER_URL")
- deepseek_model_id = self.config.get("deepseek", "DEEPSEEK_MODEL_ID")
- deepseek_api_key = self.config.get("deepseek", "DEEPSEEK_API_KEY")
- # 验证配置完整性
- if not all([deepseek_url, deepseek_model_id, deepseek_api_key]):
- missing = []
- if not deepseek_url:
- missing.append("DEEPSEEK_SERVER_URL")
- if not deepseek_model_id:
- missing.append("DEEPSEEK_MODEL_ID")
- if not deepseek_api_key:
- missing.append("DEEPSEEK_API_KEY")
- raise ModelConfigError(f"DeepSeek模型配置不完整,缺少: {', '.join(missing)}")
- # 检查连接
- if not self._check_connection(deepseek_url, deepseek_api_key):
- logger.warning(f"DeepSeek模型服务连接失败: {deepseek_url}")
- raise ModelConnectionError(f"无法连接到DeepSeek模型服务: {deepseek_url}")
- llm = ChatOpenAI(
- base_url=deepseek_url,
- model=deepseek_model_id,
- api_key=deepseek_api_key,
- temperature=0.7,
- timeout=self.REQUEST_TIMEOUT,
- extra_body={
- "enable_thinking": False,
- })
- logger.info(f"DeepSeek模型初始化成功: {deepseek_model_id}")
- return llm
- except ModelConfigError:
- raise
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"DeepSeek模型初始化异常: {e}")
- return self._handle_model_error("deepseek", error)
- def _get_gemini_model(self):
- """
- 获取Gemini模型
- Returns:
- ChatOpenAI: 配置好的Gemini模型实例
- """
- try:
- gemini_url = self.config.get("gemini", "GEMINI_SERVER_URL")
- gemini_model_id = self.config.get("gemini", "GEMINI_MODEL_ID")
- gemini_api_key = self.config.get("gemini", "GEMINI_API_KEY")
- # 验证配置完整性
- if not all([gemini_url, gemini_model_id, gemini_api_key]):
- missing = []
- if not gemini_url:
- missing.append("GEMINI_SERVER_URL")
- if not gemini_model_id:
- missing.append("GEMINI_MODEL_ID")
- if not gemini_api_key:
- missing.append("GEMINI_API_KEY")
- raise ModelConfigError(f"Gemini模型配置不完整,缺少: {', '.join(missing)}")
- # 检查连接
- if not self._check_connection(gemini_url, gemini_api_key):
- logger.warning(f"Gemini模型服务连接失败: {gemini_url}")
- raise ModelConnectionError(f"无法连接到Gemini模型服务: {gemini_url}")
- llm = ChatOpenAI(
- base_url=gemini_url,
- model=gemini_model_id,
- api_key=gemini_api_key,
- temperature=0.7,
- timeout=self.REQUEST_TIMEOUT,
- )
- logger.info(f"Gemini模型初始化成功: {gemini_model_id}")
- return llm
- except ModelConfigError:
- raise
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"Gemini模型初始化异常: {e}")
- return self._handle_model_error("gemini", error)
- def _get_lq_qwen3_8b_model(self):
- """
- 获取本地Qwen3-8B-Instruct模型
- Returns:
- ChatOpenAI: 配置好的本地Qwen3-8B模型实例
- """
- try:
- server_url = "http://192.168.91.253:9002/v1"
- model_id = "Qwen3-8B"
- # 检查本地服务连接
- if not self._check_connection(server_url, "dummy", timeout=3):
- logger.warning(f"本地Qwen3-8B模型服务连接失败: {server_url}")
- raise ModelConnectionError(f"无法连接到本地Qwen3-8B模型服务: {server_url}")
- llm = ChatOpenAI(
- base_url=server_url,
- model=model_id,
- api_key="dummy",
- temperature=0.7,
- timeout=self.REQUEST_TIMEOUT,
- )
- logger.info(f"本地Qwen3-8B模型初始化成功: {model_id}")
- return llm
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"本地Qwen3-8B模型初始化异常: {e}")
- return self._handle_model_error("lq_qwen3_8b", error)
- def _get_lq_qwen3_8b_lora_model(self):
- """
- 获取本地Qwen3-8B-lq-lora模型
- Returns:
- ChatOpenAI: 配置好的本地Qwen3-8B-lq-lora模型实例
- """
- try:
- server_url = self.config.get("lq_qwen3_8B_lora", "LQ_QWEN3_8B_LQ_LORA_SERVER_URL")
- model_id = self.config.get("lq_qwen3_8B_lora", "LQ_QWEN3_8B_LQ_LORA_MODEL_ID")
- api_key = self.config.get("lq_qwen3_8B_lora", "LQ_QWEN3_8B_LQ_LORA_API_KEY", "dummy")
- # 验证配置完整性
- if not all([server_url, model_id]):
- missing = []
- if not server_url:
- missing.append("LQ_QWEN3_8B_LQ_LORA_SERVER_URL")
- if not model_id:
- missing.append("LQ_QWEN3_8B_LQ_LORA_MODEL_ID")
- raise ModelConfigError(f"本地Qwen3-8B-lq-lora模型配置不完整,缺少: {', '.join(missing)}")
- # 检查本地服务连接
- if not self._check_connection(server_url, api_key, timeout=3):
- logger.warning(f"本地Qwen3-8B-lq-lora模型服务连接失败: {server_url}")
- raise ModelConnectionError(f"无法连接到本地Qwen3-8B-lq-lora模型服务: {server_url}")
- llm = ChatOpenAI(
- base_url=server_url,
- model=model_id,
- api_key=api_key,
- temperature=0.7,
- timeout=self.REQUEST_TIMEOUT,
- )
- logger.info(f"本地Qwen3-8B-lq-lora模型初始化成功: {model_id}")
- return llm
- except ModelConfigError:
- raise
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"本地Qwen3-8B-lq-lora模型初始化异常: {e}")
- return self._handle_model_error("lq_qwen3_8b_lora", error)
- def _get_lq_qwen3_4b_model(self):
- """
- 获取本地Qwen3-4B-Instruct模型
- Returns:
- ChatOpenAI: 配置好的本地Qwen3-4B模型实例
- """
- try:
- server_url = "http://192.168.91.253:9001/v1"
- model_id = "Qwen3-4B"
- # 检查本地服务连接
- if not self._check_connection(server_url, "dummy", timeout=3):
- logger.warning(f"本地Qwen3-4B模型服务连接失败: {server_url}")
- raise ModelConnectionError(f"无法连接到本地Qwen3-4B模型服务: {server_url}")
- llm = ChatOpenAI(
- base_url=server_url,
- model=model_id,
- api_key="dummy",
- temperature=0.7,
- timeout=self.REQUEST_TIMEOUT,
- )
- logger.info(f"本地Qwen3-4B模型初始化成功: {model_id}")
- return llm
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"本地Qwen3-4B模型初始化异常: {e}")
- return self._handle_model_error("lq_qwen3_4b", error)
- def _get_qwen_local_14b_model(self):
- """
- 获取本地Qwen3-14B-Instruct模型
- Returns:
- ChatOpenAI: 配置好的本地Qwen3-14B模型实例
- """
- try:
- server_url = "http://192.168.91.253:9003/v1"
- model_id = "Qwen3-14B"
- # 检查本地服务连接
- if not self._check_connection(server_url, "dummy", timeout=3):
- logger.warning(f"本地Qwen3-14B模型服务连接失败: {server_url}")
- raise ModelConnectionError(f"无法连接到本地Qwen3-14B模型服务: {server_url}")
- llm = ChatOpenAI(
- base_url=server_url,
- model=model_id,
- api_key="dummy",
- temperature=0.7,
- timeout=self.REQUEST_TIMEOUT,
- )
- logger.info(f"本地Qwen3-14B模型初始化成功: {model_id}")
- return llm
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"本地Qwen3-14B模型初始化异常: {e}")
- return self._handle_model_error("qwen_local_14b", error)
- def _get_lq_qwen3_8b_emd(self):
- """
- 获取本地Qwen3-Embedding-8B嵌入模型
- Returns:
- OpenAIEmbeddings: 配置好的本地Qwen3-Embedding-8B嵌入模型实例
- """
- try:
- server_url = "http://192.168.91.253:9003/v1"
- model_id = "Qwen3-Embedding-8B"
- # 检查本地服务连接
- if not self._check_connection(server_url, "dummy", timeout=3):
- logger.warning(f"本地Qwen3-Embedding-8B模型服务连接失败: {server_url}")
- raise ModelConnectionError(f"无法连接到本地Qwen3-Embedding-8B模型服务: {server_url}")
- # 使用 langchain_openai 的 OpenAIEmbeddings
- embeddings = OpenAIEmbeddings(
- base_url=server_url,
- model=model_id,
- api_key="dummy", # 本地模型使用虚拟API key
- timeout=self.REQUEST_TIMEOUT,
- )
- logger.info(f"本地Qwen3-Embedding-8B模型初始化成功: {model_id}")
- return embeddings
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"本地Qwen3-Embedding-8B模型初始化异常: {e}")
- return self._handle_model_error("lq_qwen3_8b_emd", error)
- def _get_siliconflow_embedding_model(self):
- """
- 获取硅基流动Qwen3-Embedding-8B嵌入模型
- Returns:
- OpenAIEmbeddings: 配置好的硅基流动Qwen3-Embedding-8B嵌入模型实例
- """
- try:
- server_url = self.config.get("siliconflow_embed", "SLCF_EMBED_SERVER_URL")
- api_key = self.config.get("siliconflow_embed", "SLCF_EMBED_API_KEY")
- model_id = self.config.get("siliconflow_embed", "SLCF_EMBED_MODEL_ID", "Qwen/Qwen3-Embedding-8B")
- dimensions = self.config.get("siliconflow_embed", "SLCF_EMBED_DIMENSIONS", "4096")
- # 验证配置完整性
- if not all([server_url, api_key, model_id]):
- missing = []
- if not server_url:
- missing.append("SLCF_EMBED_SERVER_URL")
- if not api_key:
- missing.append("SLCF_EMBED_API_KEY")
- if not model_id:
- missing.append("SLCF_EMBED_MODEL_ID")
- raise ModelConfigError(f"硅基流动Embedding模型配置不完整,缺少: {', '.join(missing)}")
- # 检查连接
- if not self._check_connection(server_url, api_key):
- logger.warning(f"硅基流动Embedding模型服务连接失败: {server_url}")
- raise ModelConnectionError(f"无法连接到硅基流动Embedding模型服务: {server_url}")
- # 使用 langchain_openai 的 OpenAIEmbeddings
- embeddings = OpenAIEmbeddings(
- base_url=server_url,
- model=model_id,
- api_key=api_key,
- timeout=self.REQUEST_TIMEOUT,
- )
- logger.info(f"硅基流动Embedding模型初始化成功: {model_id} (dimensions: {dimensions})")
- return embeddings
- except ModelConfigError:
- raise
- except ModelConnectionError:
- raise
- except Exception as e:
- error = ModelAPIError(f"硅基流动Embedding模型初始化异常: {e}")
- return self._handle_model_error("siliconflow_embed", error)
-
- # 创建全局实例
- model_handler = ModelHandler()
- def get_models():
- """
- 获取模型的全局函数
- Returns:
- tuple: (llm, chat, embed) - LLM模型、聊天模型和嵌入模型实例
- 注意:当前llm和chat使用相同模型实例,embed暂时返回None
- Note:
- 这是一个便捷函数,直接使用全局model_handler实例获取模型
- """
- try:
- llm = model_handler.get_models()
- # 暂时返回相同的模型作为chat和embed
- return llm, llm, None
- except Exception as e:
- logger.error(f"获取模型失败: {e}")
- raise ModelConnectionError(f"无法获取模型服务: {e}")
|