utils.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. from langchain_openai import ChatOpenAI
  2. from langchain_core.messages import HumanMessage
  3. from foundation.base.config import config_handler
  4. class ModelHandler:
  5. def __init__(self):
  6. self.config = config_handler
  7. def get_models(self):
  8. """
  9. 获取模型,默认为豆包
  10. """
  11. model_type = self.config.get("model", "MODEL_TYPE")
  12. if model_type == "doubao":
  13. return self._get_doubao_model()
  14. elif model_type == "qwen":
  15. return self._get_qwen_model()
  16. elif model_type == "deepseek":
  17. return self._get_deepseek_model()
  18. elif model_type == "qwen_local_1.5b":
  19. return self._get_qwen_local_1_5b_model()
  20. elif model_type == "qwen_local_14b":
  21. return self._get_qwen_local_14b_model()
  22. else:
  23. # 默认返回豆包
  24. return self._get_doubao_model()
  25. def _get_doubao_model(self):
  26. """
  27. 获取豆包模型
  28. """
  29. doubao_url = self.config.get("doubao", "DOUBAO_SERVER_URL")
  30. doubao_model_id = self.config.get("doubao", "DOUBAO_MODEL_ID")
  31. doubao_api_key = self.config.get("doubao", "DOUBAO_API_KEY")
  32. llm = ChatOpenAI(
  33. base_url=doubao_url,
  34. model=doubao_model_id,
  35. api_key=doubao_api_key,
  36. temperature=0.7,
  37. extra_body={
  38. "enable_thinking": False,
  39. })
  40. return llm
  41. def _get_qwen_model(self):
  42. """
  43. 获取通义千问模型
  44. """
  45. qwen_url = self.config.get("qwen", "QWEN_SERVER_URL")
  46. qwen_model_id = self.config.get("qwen", "QWEN_MODEL_ID")
  47. qwen_api_key = self.config.get("qwen", "QWEN_API_KEY")
  48. print(f"Debug - qwen_url: {qwen_url}")
  49. print(f"Debug - qwen_model_id: {qwen_model_id}")
  50. print(f"Debug - qwen_api_key: {qwen_api_key[:10]}..." if qwen_api_key else "Debug - qwen_api_key: None")
  51. llm = ChatOpenAI(
  52. base_url=qwen_url,
  53. model=qwen_model_id,
  54. api_key=qwen_api_key,
  55. temperature=0.7,
  56. extra_body={
  57. "enable_thinking": False,
  58. })
  59. return llm
  60. def _get_deepseek_model(self):
  61. """
  62. 获取通义千问模型
  63. """
  64. qwen_url = self.config.get("qwen", "QWEN_SERVER_URL")
  65. qwen_model_id = self.config.get("qwen", "QWEN_MODEL_ID")
  66. qwen_api_key = self.config.get("qwen", "QWEN_API_KEY")
  67. print(f"Debug - qwen_url: {qwen_url}")
  68. print(f"Debug - qwen_model_id: {qwen_model_id}")
  69. print(f"Debug - qwen_api_key: {qwen_api_key[:10]}..." if qwen_api_key else "Debug - qwen_api_key: None")
  70. llm = ChatOpenAI(
  71. base_url=qwen_url,
  72. model=qwen_model_id,
  73. api_key=qwen_api_key,
  74. temperature=0.7,
  75. extra_body={
  76. "enable_thinking": False,
  77. })
  78. return llm
  79. def _get_deepseek_model(self):
  80. """
  81. 获取通义千问模型
  82. """
  83. deepseek_url = self.config.get("deepseek", "DEEPSEEK_SERVER_URL")
  84. deepseek_model_id = self.config.get("deepseek", "DEEPSEEK_MODEL_ID")
  85. deepseek_api_key = self.config.get("deepseek", "DEEPSEEK_API_KEY")
  86. print(f"Debug - deepseek_url: {deepseek_url}")
  87. print(f"Debug - deepseek_model_id: {deepseek_model_id}")
  88. print(f"Debug - deepseek_api_key: {deepseek_api_key[:10]}..." if deepseek_api_key else "Debug - deepseek_api_key: None")
  89. llm = ChatOpenAI(
  90. base_url=deepseek_url,
  91. model=deepseek_model_id,
  92. api_key=deepseek_api_key,
  93. temperature=0.7,
  94. extra_body={
  95. "enable_thinking": False,
  96. })
  97. return llm
  98. def _get_gemini_model(self):
  99. """
  100. 获取通义千问模型
  101. """
  102. gemini_url = self.config.get("gemini", "GEMINI_SERVER_URL")
  103. gemini_model_id = self.config.get("gemini", "GEMINI_MODEL_ID")
  104. gemini_api_key = self.config.get("gemini", "GEMINI_API_KEY")
  105. print(f"Debug - gemini_url: {gemini_url}")
  106. print(f"Debug - gemini_model_id: {gemini_model_id}")
  107. print(f"Debug - gemini_api_key: {gemini_api_key[:10]}..." if gemini_api_key else "Debug - gemini_api_key: None")
  108. llm = ChatOpenAI(
  109. base_url=gemini_url,
  110. model=gemini_model_id,
  111. api_key=gemini_api_key,
  112. temperature=0.7,
  113. extra_body={
  114. "enable_thinking": False,
  115. })
  116. return llm
  117. def _get_qwen_local_1_5b_model(self):
  118. """
  119. 获取本地Qwen2.5-1.5B-Instruct模型
  120. """
  121. llm = ChatOpenAI(
  122. base_url="http://172.16.35.50:8000/v1",
  123. model="Qwen2.5-1.5B-Instruct",
  124. api_key="sk-dummy", # 本地模型使用虚拟API key
  125. temperature=0.7,
  126. )
  127. return llm
  128. def _get_qwen_local_14b_model(self):
  129. """
  130. 获取本地Qwen3-14B模型
  131. """
  132. llm = ChatOpenAI(
  133. base_url="http://172.16.35.50:8003/v1",
  134. model="Qwen3-14B",
  135. api_key="sk-dummy", # 本地模型使用虚拟API key
  136. temperature=0.7,
  137. )
  138. return llm
  139. # 创建全局实例
  140. model_handler = ModelHandler()
  141. def get_models():
  142. """
  143. 获取模型的全局函数
  144. 返回: (llm, chat, embed)
  145. """
  146. llm = model_handler.get_models()
  147. # 暂时返回相同的模型作为chat和embed
  148. return llm, llm, None