Преглед на файлове

删除多余代码增加测试案例

lingmin_package@163.com преди 5 месеца
родител
ревизия
6f785ecc3a

+ 43 - 7
README.md

@@ -17,10 +17,46 @@
 
 
 
-http://localhost:8001/queryex/stream
-{
-  "config": {
-      "sessionId":"111"
-  },
-  "input": "你好"
-}
+
+### 测试接口
+
+  - 生成模型接口 
+    - chat
+        http://localhost:8001/test/generate/chat
+        {
+          "config": {
+              "session_id":"111"
+          },
+          "input": "你好"
+        }
+    - stream
+      http://localhost:8001/test/generate/stream
+        {
+          "config": {
+              "session_id":"111"
+          },
+          "input": "你好,我叫小红,你叫什么?"
+        }
+
+
+
+  - agent 智能体
+    - chat
+      http://localhost:8001/test/agent/stream
+        {
+          "config": {
+              "session_id":"111"
+          },
+          "input": "你好"
+        }
+
+    - stream
+      http://localhost:8001/test/agent/stream
+        {
+          "config": {
+              "session_id":"111"
+          },
+          "input": "你好"
+        }
+
+

+ 3 - 41
agent/base_agent.py

@@ -11,10 +11,9 @@
 from datetime import datetime
 from io import StringIO
 from contextlib import redirect_stdout
-from typing import Dict, List, Optional, AsyncGenerator, Any, OrderedDict
+from typing import Dict, List, Optional
 from logger.loggering import server_logger
 from utils.redis_utils import get_redis_result_cache_data_and_delete_key
-from enums.common_enums import UserRoleEnum
 
 class BaseAgent:
     """
@@ -69,60 +68,34 @@ class BaseAgent:
     def get_input_context(
             self,
             trace_id: str,
-            business_scene: str,
             task_prompt_info: dict,
             input_query: str,
             context: Optional[str] = None,
-            supplement_info: Optional[str] = None,
-            header_info: Optional[Dict] = None , 
-            config_param: Optional[dict] = None
+            supplement_info: Optional[str] = None
     ) -> tuple[str,str]:
         """构建场景优化的上下文提示"""
         context = context or "无相关数据"
-        supplement_info = supplement_info or "无补充信息"
-        token = header_info.get('token', '') if header_info else ''
-        tenantId = header_info.get('tenantId', '') if header_info else ''
-        user_role = config_param.get('user_role', UserRoleEnum.COMMON.code) if config_param else UserRoleEnum.COMMON.code
         task_prompt_info_str = task_prompt_info["task_prompt"]
-        call_tools_return_data_type = "text"
-        final_result_data_type = "text"
-        # 如果配置按配置要求,如果未配置默认
-        call_tools_return_data_type = call_tools_return_data_type if call_tools_return_data_type else "text"
-        final_result_data_type = final_result_data_type if final_result_data_type else "Markdown"
-
+        
         # 场景优化的上下文模板
         context_template = """
         助手会话 [ID: {trace_id}] 
         时间: {timestamp}
         任务: {task_prompt_info_str}
         
-
         用户提供上下文信息:
         {context}
-
-        用户补充信息:
-        {supplement_info}
-
         用户输入问题:
         {input}
         
-        用户角色: {user_role}
-        安全验证: {token}
-        场ID: {tenantId}
         """
 
         input_context = context_template.format(
             trace_id=trace_id,
-            business_scene=business_scene,
             task_prompt_info_str=task_prompt_info_str,
             context=context,
             input=input_query,
-            call_tools_return_data_type=call_tools_return_data_type,
-            final_result_data_type=final_result_data_type,
             supplement_info=supplement_info,
-            user_role=user_role,
-            token=token,
-            tenantId=tenantId,
             timestamp=datetime.now().strftime("%Y-%m-%d %H:%M:%S")
         )
         
@@ -130,27 +103,16 @@ class BaseAgent:
           # 场景优化的上下文模板
         summary_context_template = """
         助手会话 [ID: {trace_id}] 
-        用户意图场景: {business_scene}
         上下文信息:
         {context}
-        补充信息:
-        {supplement_info}
         用户问题:
         {input}
-        用户角色: {user_role}
-        安全验证: {token}
-        场ID: {tenantId}
         """
 
         input_summary_context = summary_context_template.format(
             trace_id=trace_id,
-            business_scene=business_scene,
             context=context,
             input=input_query,
-            supplement_info=supplement_info,
-            user_role=user_role,
-            token=token,
-            tenantId=tenantId
         )
         return input_context , input_summary_context
 

+ 0 - 84
agent/cus_streamer.py

@@ -1,84 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :cus_streamer.py
-@IDE        :PyCharm
-@Author     :
-@Date       :2025/7/14 12:04
-'''
-from langchain_core.messages import HumanMessage
-from typing import AsyncGenerator
-
-import asyncio
-
-
-class AdaptiveStreamer:
-    def __init__(self, min_chunk: int = 256, max_chunk: int = 4096, initial_chunk: int = 4):
-        self.min_chunk = min_chunk
-        self.max_chunk = max_chunk
-        self.chunk_size = initial_chunk
-        self.buffer = ""
-        self.last_latency = 0.0
-
-    async def astream(self, model, prompt: str, config, stream_mode="values") -> AsyncGenerator[bytes, None]:
-        """
-        自适应流式输出
-        """
-        async for langchain_chunk in model.astream({"messages": [HumanMessage(content=prompt)]}, config=config, stream_mode=stream_mode):
-            # 检查是否有内容属性
-            if not hasattr(langchain_chunk['messages'][-1], 'content') or not langchain_chunk['messages'][-1].content:
-                continue  # 跳过空内容块
-
-            if isinstance(langchain_chunk['messages'][-1], HumanMessage):
-                continue
-
-            try:
-                # 添加到缓冲区
-                chunk_bytes = langchain_chunk['messages'][-1].content
-                self.buffer += chunk_bytes
-                # 处理缓冲区
-                while len(self.buffer) >= self.chunk_size:
-                    # 提取块
-                    output_chunk = self.buffer[:self.chunk_size]
-                    self.buffer = self.buffer[self.chunk_size:]
-
-                    # 记录发送时间
-                    start_time = asyncio.get_event_loop().time()
-                    yield output_chunk
-                    send_duration = asyncio.get_event_loop().time() - start_time
-
-                    # 基于发送时间调整块大小
-                    self.adjust_chunk_size(send_duration)
-
-            except Exception as e:
-                # 处理编码或其他错误
-                error_msg = f"[错误] {str(e)}".encode('utf-8')
-                yield error_msg
-                continue  # 继续处理后续块
-
-        # 发送剩余内容
-        if self.buffer:
-            yield self.buffer
-            self.buffer = ""
-
-    def adjust_chunk_size(self, send_duration: float):
-        """
-        基于发送时间调整块大小
-        """
-        # 计算发送速率(字节/秒)
-        if send_duration > 0:
-            send_rate = self.chunk_size / send_duration
-        else:
-            send_rate = float('inf')
-
-        # 调整策略
-        if send_rate < 10000:  # 低速网络(<10KB/s)
-            new_size = max(self.min_chunk, int(self.chunk_size * 0.8))
-        elif send_rate > 100000:  # 高速网络(>100KB/s)
-            new_size = min(self.max_chunk, int(self.chunk_size * 1.2))
-        else:
-            new_size = self.chunk_size
-
-        # 应用平滑过渡
-        self.chunk_size = int(0.7 * self.chunk_size + 0.3 * new_size)

+ 0 - 99
agent/fixed_intent.py

@@ -1,99 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :intent.py
-@IDE        :PyCharm
-@Author     : 
-@Date       :2025/7/14 12:04
-'''
-
-
-import os
-import sys
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-
-from logger.loggering import server_logger
-from utils.utils import get_models
-from langchain_core.prompts import SystemMessagePromptTemplate
-from langchain_core.prompts import HumanMessagePromptTemplate
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.prompts import FewShotChatMessagePromptTemplate
-from utils.yaml_utils import fixed_question_intent_config
-
-
-
-class FixedIntentIdentifyClient:
-
-    def __init__(self):
-        """
-            创建意图识别类
-        """
-          # 获取部署的模型列表
-        llm, chat, embed = get_models()
-        self.llm_recognition = chat
-        # 加载 意图识别系统配置信息
-        self.intent_prompt = fixed_question_intent_config
-
-
-
-    def recognize_intent(self, input):
-        """
-        意图识别
-        输入:用户输入的问题
-        输出:识别出的意图,可选项:
-        """
-        # 准备few-shot样例
-        examples = self.intent_prompt["fixed_problem_answer"]
-        #server_logger.info(f"加载prompt配置.examples: {examples}")
-
-        # 定义样本模板
-        examples_prompt = ChatPromptTemplate.from_messages(
-            [
-                ("human", "{inn}"),
-                ("ai", "{out}"),
-            ]
-        )
-        few_shot_prompt = FewShotChatMessagePromptTemplate(example_prompt=examples_prompt,
-                                                           examples=examples)
-        final_prompt = ChatPromptTemplate.from_messages(
-            [
-                ('system', self.intent_prompt["system_prompt"]),
-                few_shot_prompt,
-                ('human', '{input}'),
-            ]
-        )
-
-        chain = final_prompt | self.llm_recognition
-        result = chain.invoke(input=input)
-        # 容错处理
-        if hasattr(result, 'content'):
-            # 如果 result 有 content 属性,使用它
-            return result.content
-        else:
-            # 否则,直接返回 result
-            return result
-
-
-
-
-
-fixed_intent_identify_client = FixedIntentIdentifyClient()
-
-
-if __name__ == '__main__':
-    #input = "你好"
-    #input = "我要查询信息"  #result=cattle_farm_query
-    input = "查询最近步数情况" #result=cattle_farm_query
-    input = "这套系统给我带来了什么好处"  #result=cattle_farm_warning_plan
-    input = "建设数字化后给我带来了多少效益" #result=cattle_farm_warning_task_execute
-    #input = "查询11基本信息"
-    #input = "查询16号的信息,返回json"
-    # input = "查询16号的信息,返回markdown"
-    # input = "编号为0014的数据,返回JSON格式数据"
-    # input = "查询0013的信息"
-    # input = "查询0013的数据"
-    # input = """
-    result = fixed_intent_identify_client.recognize_intent(input)
-    server_logger.info(f"input={input} ,result={result}")
-    

+ 0 - 0
generate/__init__.py → agent/generate/__init__.py


+ 114 - 0
agent/generate/model_generate.py

@@ -0,0 +1,114 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :model_generate.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/14 14:22
+'''
+
+from typing import Dict, Optional
+from langchain_core.prompts import HumanMessagePromptTemplate
+from langchain_core.prompts import ChatPromptTemplate
+from utils.utils import get_models
+from utils.yaml_utils import system_prompt_config
+
+
+class TestGenerateModelClient:
+    """
+        主要是生成式模型
+    """
+
+    def __init__(self):
+        # 获取部署的模型列表
+        llm, chat, embed = get_models()
+        self.llm = llm
+        self.chat = chat
+        # 固定系统提示词
+        self.system_prompt = system_prompt_config["system_prompt"]
+
+
+    def get_prompt_template(self):
+        """
+            构造普通Prompt提示词模板
+        """
+        human_template = """
+            {system_message}
+            用户的问题为:
+                {question}  
+            答案为:
+        """
+        human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
+        chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
+        return chat_prompt_template
+    
+    
+    def get_model_generate_invoke(self, trace_id, task_prompt_info: dict, input_query, context=None):
+        """
+            模型生成链
+        """
+        # Step 1: 定义系统提示词模板 system_prompt
+
+        # Step 2: 构建完整的 prompt 模板
+        prompt_template = ChatPromptTemplate.from_messages([
+            ("system", task_prompt_info["task_prompt"]),
+            ("human", "{input}")
+        ])
+        # Step 3: 初始化模型
+        # Step 4: 使用模板格式化输入
+        messages = prompt_template.invoke({"input": input_query})
+        # Step 5: 流式调用模型
+        response = self.llm.invoke(messages)
+        return response.content
+
+    def get_model_generate_stream(self, trace_id, task_prompt_info: dict, input_query, context=None):
+        """
+            模型生成链
+        """
+        # Step 1: 定义系统提示词模板 system_prompt
+
+        # Step 2: 构建完整的 prompt 模板
+        prompt_template = ChatPromptTemplate.from_messages([
+            ("system", task_prompt_info["task_prompt"]),
+            ("human", "{input}")
+        ])
+        # Step 3: 初始化模型
+        # Step 4: 使用模板格式化输入
+        messages = prompt_template.invoke({"input": input_query})
+        # Step 5: 流式调用模型
+        response = self.llm.stream(messages)
+        # Step 6: 逐 token 输出(打字机效果)
+        for chunk in response:
+            yield chunk.content
+
+
+
+    def get_input_context(
+            self,
+            trace_id: str,
+            task_prompt_info: dict,
+            input_query: str,
+            context: Optional[str] = None
+    ) -> str:
+        #server_logger.info(f"task_prompt_info: {task_prompt_info}")
+        """构建问题和上下文"""
+        context = context or "无"
+        task_prompt_info_str = task_prompt_info["task_prompt"]
+
+        # 针对场景优化的上下文提示
+        base_context_prompt = """
+            日志链路跟踪ID:{trace_id}
+            任务信息:{task_prompt_info_str}
+            相关上下文数据:{context}
+            户问题:{input}
+        """
+        return base_context_prompt.format(
+            trace_id=trace_id,
+            task_prompt_info_str=task_prompt_info_str,
+            context=context,
+            input=input_query
+        )
+
+#
+test_generate_model_client = TestGenerateModelClient()

+ 0 - 9
agent/generator_title.py

@@ -1,9 +0,0 @@
-# !/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :generator_title.py
-@IDE        :PyCharm
-@Author     :
-@Date       :2025/7/22 09:33
-'''

+ 0 - 125
agent/intent.py

@@ -1,125 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :intent.py
-@IDE        :PyCharm
-@Author     : 
-@Date       :2025/7/14 12:04
-'''
-
-
-import os
-import sys
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-
-from logger.loggering import server_logger
-from utils.utils import get_models
-from langchain_core.prompts import SystemMessagePromptTemplate
-from langchain_core.prompts import HumanMessagePromptTemplate
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.prompts import FewShotChatMessagePromptTemplate
-from utils import yaml_utils
-from agent.session.session_manager import SessionContextMemoryManager
-from base.config import config_handler
-
-
-class IntentIdentifyClient:
-
-    def __init__(self):
-        """
-            创建意图识别类
-        """
-          # 获取部署的模型列表
-        llm, chat, embed = get_models()
-        self.llm_recognition = chat
-        # 加载 意图识别系统配置信息
-        self.intent_prompt = yaml_utils.get_intent_prompt()
-
-    async def recognize_intent(self , trace_id: str , config: dict , input: str):
-        """
-        意图识别
-        输入:用户输入的问题
-        输出:识别出的意图,可选项:
-        - cattle_farm_common
-        - cattle_farm_query
-        - cattle_farm_warning_plan
-        """
-        session_id = config.sessionId
-        use_history_recognize_intent = config_handler.get("lru", "USE_RECOGNIZE_INTENT_HISTORY_MESSAGES" , "False")
-        server_logger.info(f"[使用用户最新历史记录作为意图识别]use_history_recognize_intent: {use_history_recognize_intent}")
-        history = "无"
-        if use_history_recognize_intent == "True":
-            # 上下文管理器
-            session_context_memory_manager = SessionContextMemoryManager(trace_id , session_id)
-            # 获取内存最新的多少条历史记录(将消息列表序列化为字符串)
-            history = await session_context_memory_manager.get_memory_last_history_str()
-        # 根据历史记录和用户问题进行识别意图
-        return self.recognize_intent_history(input=input , history=history)
-
-
-    def recognize_intent_history(self , input: str , history="无"):
-        """
-        意图识别
-        输入:用户输入的问题
-        输出:识别出的意图,可选项:
-        - cattle_farm_common
-        - cattle_farm_query
-        - cattle_farm_warning_plan
-        """
-        # 准备few-shot样例
-        examples = self.intent_prompt["intent_examples"]
-        #server_logger.info(f"加载prompt配置.examples: {examples}")
-        system_prompt = self.intent_prompt["system_prompt"]
-        system_prompt = system_prompt.format(history=history)
-        server_logger.info(f"增加用户历史记录,用于意图识别,prompt配置.system_prompt: {system_prompt}")
-
-        # 定义样本模板
-        examples_prompt = ChatPromptTemplate.from_messages(
-            [
-                ("human", "{inn}"),
-                ("ai", "{out}"),
-            ]
-        )
-        few_shot_prompt = FewShotChatMessagePromptTemplate(example_prompt=examples_prompt,
-                                                           examples=examples)
-        final_prompt = ChatPromptTemplate.from_messages(
-            [
-                ('system', system_prompt),
-                few_shot_prompt,
-                ('human', '{input}'),
-            ]
-        )
-
-        chain = final_prompt | self.llm_recognition
-        server_logger.info(f"意图识别输入input: {input}")
-        result = chain.invoke(input={"input": input})
-        # 容错处理
-        if hasattr(result, 'content'):
-            # 如果 result 有 content 属性,使用它
-            return result.content
-        else:
-            # 否则,直接返回 result
-            return result
-
-
-
-
-
-intent_identify_client = IntentIdentifyClient()
-
-
-if __name__ == '__main__':
-    #input = "你好"
-    #input = "我要查询信息"  #result=cattle_farm_query
-    input = "查询最近步数情况" #result=cattle_farm_query
-   
-    
-    input = "01、10"
-    input = "继续"
-    input = "当前没有"
-    #input = "查询12步数"
-    #input = "分析当前环境数据"
-    result = intent_identify_client.recognize_intent_history(history=history , input=input)
-    server_logger.info(f"result={result}")
-    

+ 0 - 10
agent/session/__init__.py

@@ -1,10 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :__init__.py.py
-@IDE        :Cursor
-@Author     : 
-@Date       :2025/7/30
-'''
-

+ 0 - 344
agent/session/session_manager.py

@@ -1,344 +0,0 @@
-# !/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :session_manager.py
-@IDE        :PyCharm
-@Author     :
-@Date       :2025/7/24 03:03
-'''
-
-import asyncio
-import logging
-import re
-import time
-from typing import Any, Dict, List, Tuple
-# 假设的导入(根据实际框架调整)
-from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
-from base.config import config_handler
-from base.redis_config import load_config_from_env
-from base.redis_connection import RedisConnectionFactory
-from base.async_redis_lock import AsyncRedisLock
-
-from langchain.memory import ConversationBufferMemory
-from langchain_community.chat_message_histories import RedisChatMessageHistory
-from langchain_core.runnables.history import RunnableWithMessageHistory
-from langchain_core.messages import get_buffer_string
-from langchain_core.messages import messages_to_dict, messages_from_dict
-from langchain.prompts import PromptTemplate
-from utils.utils import get_models
-import warnings
-from langchain_core._api.deprecation import LangChainDeprecationWarning
-from logger.loggering import server_logger
-from utils.yaml_utils import system_prompt_config
-
-from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, SystemMessage, ToolMessage, FunctionMessage
-
-
-
-class SessionManager:
-    """集中管理会话状态和锁定机制"""
-
-    def __init__(self , trace_id: str , lock_key_prefix: str , session_id: str, client_id: str = "default"):
-        self.trace_id = trace_id
-        self.session_id = session_id
-        self.client_id = client_id
-        self.session_lock = None
-        self.session_lock_key = lock_key_prefix + session_id
-        # 上下文管理器
-        self.session_context_memory_manager = SessionContextMemoryManager(trace_id , session_id)
-        
-        
-    async def is_session_locked(self) -> bool:
-        """检查会话是否被其他设备锁定"""
-        if await self.redis_client.exists(self.session_lock_key):
-            return True
-        return False
-
-    async def acquire_session_lock(self, timeout: float = 5) -> bool:
-        """尝试获取会话锁,带超时机制"""
-        config_is_lock = config_handler.get("chat", "CHAT_SESSION_LOCK" , "True")
-        server_logger.info(trace_id =self.trace_id, msg=f"创建新会话: {self.session_lock_key},config_is_lock:{config_is_lock} (锁定设备: {self.client_id})")
-        if config_is_lock == "False":
-            return True
-        
-        try:
-         # 通过工厂模式获取 redis 连接器
-            self.redis_client = await RedisConnectionFactory.get_connection()
-            self.session_lock = AsyncRedisLock(self.redis_client, self.session_lock_key)
-            server_logger.info(trace_id =self.trace_id, msg=f"创建新会话: {self.session_lock_key} (锁定设备: {self.client_id})")
-            flag = await self.session_lock.acquire(timeout)
-            server_logger.debug(trace_id =self.trace_id, msg=f"尝试获取锁:{self.session_lock_key}-{flag}")
-            return flag
-        except asyncio.TimeoutError:
-            server_logger.warning(trace_id =self.trace_id, msg=f"获取会话锁超时: {self.session_lock_key}")
-            return False
-        except Exception as e:
-            server_logger.error(trace_id =self.trace_id, msg=f"获取会话锁失败: {self.session_lock_key}, 错误: {e}")
-            return False
-
-    async def release_session_lock(self):
-        """释放会话锁"""
-        config_is_lock = config_handler.get("chat", "CHAT_SESSION_LOCK" , "True")
-        server_logger.info(trace_id =self.trace_id, msg=f"释放新会话: {self.session_lock_key},config_is_lock:{config_is_lock} (锁定设备: {self.client_id})")
-        if config_is_lock == "False":
-            return
-        try:
-            if self.session_lock:
-                await self.session_lock.release()
-        except Exception as e:
-            server_logger.error(trace_id =self.trace_id, msg=f"释放会话锁失败: {self.session_lock_key}, 错误: {e}")
-
-
-    async def get_memory_history(self):
-        """
-            获取会话历史
-        """
-        return await self.session_context_memory_manager.get_memory_history()
-    
-    async def save_update_memory_history(self , history_messages , input_message  , output_message):
-        """
-            保存并更新历史会话
-        """
-        # 同步执行保存更新会话记录操作
-        #await self.session_context_memory_manager.save_update_memory_history(history_messages , input_message  , output_message)
-
-         # 创建任务但不等待(不阻塞)
-        asyncio.create_task(self.session_context_memory_manager.save_update_memory_history(history_messages , input_message  , output_message))
-        server_logger.info(trace_id =self.trace_id, msg=f"{self.session_id}: 保存并更新历史会话任务已创建,主协程继续执行结束")
-
-
-
-
-
-class SessionContextMemoryManager:
-    """
-        会话内存上下文管理器
-    """
-
-    def __init__(self , trace_id: str, session_id: str):
-        self.trace_id = trace_id
-        self.session_id = session_id
-        self.redis_memory = None
-        # 最大历史记录长度,超过后进行摘要处理
-        self.max_length = int(config_handler.get("lru", "AGENT_MAX_HISTORY_TOKENS"))
-        # 意图识别 可以使用最大多少条历史记录
-        self.recognize_intent_max_history = int(config_handler.get("lru", "AGENT_RECOGNIZE_INTENT_MAX_HISTORY_MESSAGES"))
-        llm, chat, embed = get_models()
-        self.llm = llm
-         # 固定系统提示词
-        self.system_prompt = system_prompt_config["summary_system_prompt"]
-        # 初始化 redis 聊天历史
-        self.init_redis_chat_history_memory()
-    
-
-    def init_redis_chat_history_memory(self):
-        """
-            获取 Redis 中指定会话的聊天记录
-        """
-        # 使用 contextmanager 仅在该代码块内忽略警告
-        with warnings.catch_warnings():
-            warnings.filterwarnings("ignore", category=LangChainDeprecationWarning)
-            redis_config = load_config_from_env()
-            #server_logger.info(trace_id =self.trace_id, msg=f"redis_config={redis_config}")
-            # 使用 RedisChatMessageHistory 存储对话历史
-            chat_history = RedisChatMessageHistory(
-                session_id=self.session_id,  # 唯一标识会话
-                url=redis_config.url  # 或直接使用 redis_client
-            )
-            #使用 Redis 存储记忆
-            self.redis_memory = ConversationBufferMemory(
-                memory_key="chat_history",
-                return_messages=True,
-                chat_memory=chat_history  # 或其他兼容存储
-            )
-
-        server_logger.info(trace_id=self.trace_id, msg=f"redis 内存上下文历史初始完成={self.redis_memory}")
-        return self.redis_memory
-
-
-    async def get_memory_history(self):
-        """
-            获取内存历史(原始记录)
-        """
-        history_messages = self.load_memory_history()
-        server_logger.debug(trace_id=self.trace_id, msg=f"begin session_id:{self.session_id}, session.history.len: {len(history_messages)}, session.history: {history_messages}")
-        return history_messages
-
-
-    async def get_memory_last_history_str(self):
-        """
-            获取内存最新的多少条历史记录(将消息列表序列化为字符串)
-                # 示例消息列表
-                messages = [
-                    HumanMessage(content="你好!"),
-                    AIMessage(content="我是AI助手。")
-                ]
-                # 转换为字符串
-                formatted_str = get_buffer_string(
-                    messages,
-                    human_prefix="User",  # 人类消息的前缀(默认"Human")
-                    ai_prefix="Assistant",  # AI消息的前缀(默认"AI")
-                    separator="\n"  # 消息分隔符(默认"\n")
-                )
-                输出结果:text
-                    User: 你好!
-                    Assistant: 我是AI助手。
-
-        """
-        history_messages = self.load_memory_history()
-        if history_messages is None or len(history_messages) == 0:
-            return "无"
-        truncated_last_history_messages = history_messages
-        if len(history_messages) > self.recognize_intent_max_history:
-             # 截取最新的 10 条消息-使用负数索引从末尾切片
-             truncated_last_history_messages = history_messages[-self.recognize_intent_max_history:]
-        # 使用安全转换 - 处理markdown格式
-        #truncated_last_history_messages = self._simplify_messages_content(truncated_last_history_messages)
-         # 获取历史字符串
-        history_messages_str = get_buffer_string(truncated_last_history_messages)
-        # 先转义大括号
-        history_messages_str = history_messages_str.replace('{', '{{').replace('}', '}}')
-        server_logger.info(trace_id=self.trace_id, msg=f"recognize_intent_history session_id:{self.session_id}, session.history.len: {len(history_messages)}, truncated.last.history.len: {len(truncated_last_history_messages)}, history_messages_str: {history_messages_str}")
-        return history_messages_str
-
-
-    def load_memory_history(self):
-        """
-            加载历史会话
-        """
-        return self.redis_memory.load_memory_variables({})["chat_history"]
-        
-
-    def save_memory_history(self, input  , output):
-        """
-            保存历史会话
-        """
-        self.redis_memory.save_context({"input": input}, {"output": output})
-
-    
-    async def save_update_memory_history(self , history_messages , input_message  , output_message):
-        """
-            如果历史记录 超出 最大长度,则先清除历史记录,再保存历史会话的摘要
-            输入参数:
-                history_messages: 历史会话
-                input_message: 输入
-                output_message: 输出
-        """
-        cur_messages = [input_message] + [output_message]
-        tmp_messages = list(history_messages) + cur_messages
-        history_messages , is_summary = await self.compress_chat_history(tmp_messages)
-        server_logger.info(trace_id =self.trace_id, msg=f"保存更新历史记录处理:session_id={self.session_id},is_summary={is_summary}")
-        
-        if is_summary:
-            # 如果是摘要消息,则清除保存的摘要消息
-            if isinstance(history_messages[0], SystemMessage):
-                self.clear_save_summary_memory(history_messages[0].content)
-            server_logger.debug(trace_id=self.trace_id, msg=f"end session_id:{self.session_id}, session.history.len: {len(history_messages)}, session.history: {history_messages}")
-        else:
-            server_logger.debug(trace_id=self.trace_id, msg=f"end session_id:{self.session_id}, session.history.len: {len(tmp_messages)}, session.history: {tmp_messages}")
-            self.redis_memory.save_context({"input": input_message.content}, {"output": output_message.content})
-
-
-    
-    def clear_save_summary_memory(self , summary_text):
-        """
-            清除原始的记录,保存的摘要历史会话
-        """
-        try:
-            server_logger.info(trace_id =self.trace_id, msg=f"type(summary_text): {type(summary_text)}")
-            server_logger.info(trace_id =self.trace_id, msg=f"summary_text: {summary_text}")
-            if not isinstance(summary_text, str):
-                # 安全兜底:尝试提取 .content
-                try:
-                    summary_text = str(summary_text.content)
-                except AttributeError:
-                    summary_text = repr(summary_text)  # 最后手段
-            self.redis_memory.clear()
-            server_logger.info(trace_id =self.trace_id, msg=f"session_id={self.session_id}先清除历史会话记录完成")
-            self.redis_memory.save_context({"input": "整理后对话摘要"}, {"output": summary_text})
-            server_logger.info(trace_id =self.trace_id, msg=f"session_id={self.session_id}再保存摘要历史记录完成")
-        except Exception as e:
-            server_logger.error(trace_id =self.trace_id, msg=f"clear_save_summary_memory error: {e}")
-        
-
-    async def compress_chat_history(self , chat_history):
-        """
-            压缩聊天历史,如果超过 max_length 字符则生成摘要
-        """
-        his_len = len(get_buffer_string(chat_history))
-        server_logger.info(trace_id =self.trace_id, msg=f"his_len={his_len},max_length={self.max_length}")
-        if his_len < self.max_length:
-            #server_logger.info(trace_id =self.trace_id, msg="get_buffer_string(chat_history) < max_length")
-            return chat_history , False
-
-        summary_prompt = PromptTemplate(
-            input_variables=["history"],
-            template=self.system_prompt
-        )
-        # 创建可运行链:prompt + llm
-        chain = summary_prompt | self.llm  # 等价于 LLMChain 的功能
-        # 获取历史字符串
-        history_str = get_buffer_string(chat_history)
-        # 异步调用(新 API)
-        summary_response = await chain.ainvoke({"history": history_str})
-        server_logger.info(trace_id =self.trace_id, msg=f"session_id={self.session_id},summary_text")
-        # 返回一个“系统消息”表示摘要
-        system_message = [SystemMessage(content=f"对话摘要:{summary_response.content}")]
-        return system_message , True
-
-
-
-
-    def _simplify_messages_content(self , history_messages):
-        """
-            简化消息内容
-            处理 Markdown 内容
-        """
-        cleaned_messages = []
-        # 在处理过程中简化内容
-        for message in history_messages:
-            if not hasattr(message, 'content') or not isinstance(message.content, str):
-                # 如果消息没有 content 或 content 不是字符串,直接添加
-                cleaned_messages.append(message)
-                continue
-                
-            content = message.content
-            content = self._simplify_markdown_content(content)
-            # 创建新消息对象,保留原始消息的类型和元数据,只更新 content
-            if isinstance(message, HumanMessage):
-                new_msg = HumanMessage(content=content, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
-            elif isinstance(message, AIMessage):
-                new_msg = AIMessage(content=content, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
-            elif isinstance(message, SystemMessage):
-                new_msg = SystemMessage(content=content, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
-            elif isinstance(message, ToolMessage):
-                # ToolMessage 的 content 可能不是用户生成的文本,通常不处理
-                new_msg = ToolMessage(content=content, tool_call_id=message.tool_call_id, name=message.name, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
-            elif isinstance(message, FunctionMessage):
-                new_msg = FunctionMessage(content=content, name=message.name, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
-            else:
-                # 对于未知类型,尝试通用方式(如果 BaseMessage 支持)
-                new_msg = message.__class__(content=content, **{k: v for k, v in message.__dict__.items() if k != 'content'})
-            
-            cleaned_messages.append(new_msg)
-        
-        return cleaned_messages
-    
-
-    def _simplify_markdown_content(self , content):
-        """简化 Markdown 内容"""
-        # 移除表格
-        content = re.sub(r'\|.*\|.*\n\|.*\|.*(\n\|.*\|.*)*', '[表格数据]', content)
-        # 移除标题
-        content = re.sub(r'#{1,6}\s*', '', content)
-        # 移除粗体斜体
-        content = re.sub(r'[*_]{1,2}(.*?)[*_]{1,2}', r'\1', content)
-        # 移除表情符号和特殊标记
-        content = re.sub(r'[✅❌📋📊]', '', content)
-        # 标准化换行
-        content = re.sub(r'\n{3,}', '\n\n', content)
-        return content.strip()
-
-
-

+ 19 - 56
agent/agent_mcp.py → agent/test_agent.py

@@ -8,7 +8,6 @@
 @Date       :2025/7/21 10:12
 '''
 import json
-import trace
 
 from langgraph.prebuilt import create_react_agent
 from sqlalchemy.sql.functions import user
@@ -16,7 +15,6 @@ from logger.loggering import server_logger
 from utils.common import handler_err
 from utils.utils import get_models
 from utils.yaml_utils import system_prompt_config
-from views import mcp_server
 
 import threading
 import time
@@ -25,12 +23,10 @@ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
 from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
 from langchain_core.runnables import RunnableConfig
 from agent.base_agent import BaseAgent
-from agent.session.session_manager import SessionManager
-from function.local_function import get_knowledge_answer
-from schemas.cattle_farm import FarmConfig
+from schemas.test_schemas import FormConfig
 
 
-class XiwuzcAgentMcpClient(BaseAgent):
+class TestAgentClient(BaseAgent):
     """
     Xiwuzc 智能助手+MCP(带完整会话管理) - 针对场景优化
     添加会话锁定机制,确保同一时间只有一个客户端可以使用特定会话
@@ -63,7 +59,7 @@ class XiwuzcAgentMcpClient(BaseAgent):
 
         # 清理任务
         self.cleanup_task = None
-        server_logger.info("mulian client initialized")
+        server_logger.info(" client initialized")
 
     async def init_agent(self):
         """初始化agent_executor(只需一次)"""
@@ -87,45 +83,33 @@ class XiwuzcAgentMcpClient(BaseAgent):
             prompt=prompt
         )
         self.initialized = True
-        server_logger.info("mulian agent initialized")
+        server_logger.info(" agent initialized")
 
 
-    async def handle_query(self, trace_id: str, business_scene: str, task_prompt_info: dict, input_query, context=None,
-                           supplement_info=None, header_info=None , config_param: FarmConfig = None):
+    async def handle_query(self, trace_id: str, task_prompt_info: dict, input_query, context=None,
+                            config_param: FormConfig = None):
         try:
             # 确保agent已初始化
             if not self.initialized:
                 await self.init_agent()
             
-            session_id = config_param.sessionId
-            user_role = config_param.userRole
-            # 构建会话管理
-            session_manager = SessionManager(lock_key_prefix="Chat_", trace_id=trace_id , session_id=session_id)
-            # 尝试获取会话锁(5秒超时)
-            if not await session_manager.acquire_session_lock():
-                return "错误:此会话已在其他设备登录,请使用新会话或等待解锁"
+            session_id = config_param.session_id
+           
 
             try:
                 # 构建输入消息
                 input_message , input_summary_context = self.get_input_context(
                     trace_id=trace_id,
-                    business_scene=business_scene,
                     task_prompt_info=task_prompt_info,
                     input_query=input_query,
-                    context=context,
-                    supplement_info=supplement_info,
-                    header_info=header_info,
-                    config_param={
-                        "session_id": session_id,
-                        "user_role": user_role
-                    }
+                    context=context
                 )
                 # 用于模型对话使用
                 input_human_message = HumanMessage(content=input_message)
                 # 用于对话历史记录摘要 
                 input_human_summary_message = HumanMessage(content=input_summary_context)
                 # 获取历史消息
-                history_messages = await session_manager.get_memory_history()
+                history_messages = []
                 # 构造完整的消息列表
                 all_messages = list(history_messages) + [input_human_message]
 
@@ -154,31 +138,27 @@ class XiwuzcAgentMcpClient(BaseAgent):
                 if full_response:
                     full_text = "".join(full_response)
                     server_logger.info(trace_id=trace_id, msg=f"full_response: {full_text}")
-                    # 保存并更新历史会话记录history
-                    await session_manager.save_update_memory_history(history_messages , input_human_summary_message, AIMessage(content=full_text))
                     full_text = self.clean_json_output(full_text)
                     return full_text
             finally:
                 # 确保释放会话锁
-                await session_manager.release_session_lock()
+                pass
         except PermissionError as e:
             # 处理会话被其他设备锁定的情况
             return str(e)
         except Exception as e:
-            handler_err(server_logger, trace_id=trace_id, err=e, err_name='query')
+            handler_err(server_logger, trace_id=trace_id, err=e, err_name='agent/chat')
             return f"系统错误: {str(e)}"
 
 
     async def handle_query_stream(
             self,
             trace_id: str,
-            business_scene: str,
             task_prompt_info: dict,
             input_query: str,
             context: Optional[str] = None,
-            supplement_info: Optional[str] = None,
             header_info: Optional[Dict] = None,
-            config_param: FarmConfig = None,
+            config_param: FormConfig = None,
     ) -> AsyncGenerator[str, None]:
         """流式处理查询(优化缓冲管理)"""
         try:
@@ -186,30 +166,15 @@ class XiwuzcAgentMcpClient(BaseAgent):
             if not self.initialized:
                 await self.init_agent()
             
-            session_id = config_param.sessionId
-            user_role = config_param.userRole
-
-            # 构建会话管理
-            session_manager = SessionManager(lock_key_prefix="ChatStream_", trace_id=trace_id , session_id=session_id)
-            # 尝试获取会话锁(5秒超时)
-            if not await session_manager.acquire_session_lock():
-                yield json.dumps({"error": "此会话已在其他设备登录"})
-                return
+            session_id = config_param.session_id
         
             try:
                 # 构建输入消息
                 input_message , input_summary_context = self.get_input_context(
                     trace_id=trace_id,
-                    business_scene=business_scene,
                     task_prompt_info=task_prompt_info,
                     input_query=input_query,
-                    context=context,
-                    supplement_info=supplement_info,
-                    header_info=header_info,
-                     config_param={
-                        "session_id": session_id,
-                        "user_role": user_role
-                    }
+                    context=context
                 )
                 server_logger.info(trace_id=trace_id, msg=f"input_context: {input_message}")
                 # 用于模型对话使用
@@ -217,7 +182,7 @@ class XiwuzcAgentMcpClient(BaseAgent):
                 # 用于对话历史记录摘要 
                 input_human_summary_message = HumanMessage(content=input_summary_context)
                  # 获取历史消息
-                history_messages = await session_manager.get_memory_history()
+                history_messages = []
                 # 构造完整的消息列表
                 all_messages = list(history_messages) + [input_human_message]
                 # 配置执行上下文
@@ -272,17 +237,15 @@ class XiwuzcAgentMcpClient(BaseAgent):
                 if full_response:
                     full_text = "".join(full_response)
                     server_logger.info(trace_id=trace_id, msg=f"full_response: {full_text}")
-                     # 保存并更新历史会话记录history
-                    await session_manager.save_update_memory_history(history_messages , input_human_summary_message, AIMessage(content=full_text))
             finally:
                 # 确保释放会话锁
-                await session_manager.release_session_lock()
+                pass
 
         except PermissionError as e:
             yield json.dumps({"error": str(e)})
         except Exception as e:
-            handler_err(server_logger, trace_id=trace_id, err=e, err_name='query_stream')
+            handler_err(server_logger, trace_id=trace_id, err=e, err_name='test_stream')
             yield json.dumps({"error": f"系统错误: {str(e)}"})
 
 
-client = XiwuzcAgentMcpClient()
+test_agent_client = TestAgentClient()

+ 1 - 14
config/config.ini

@@ -8,6 +8,7 @@ DEEPSEEK_SERVER_URL=https://api.deepseek.com
 DEEPSEEK_MODEL_ID=deepseek-chat
 DEEPSEEK_API_KEY=
 
+
 [qwen]
 MODEL_SERVER_URL=https://api-inference.modelscope.cn/v1/
 CHAT_MODEL_ID=Qwen/Qwen3-30B-A3B
@@ -18,19 +19,12 @@ API_KEY=ms-61bf873e-7536-42a9-b830-b12dca656e1f
 DASHSCOPE_API_KEY=sk-9fca4fca37ce4f509ec9ead71ccdd542
 EMBED_MODEL_ID=text-embedding-v4
 
-[mcp]
-MCP_SERVER_CONFIG_PATH=mulian_servers_config.json
 
 
 [app]
 APP_CODE=lq-agent
 APP_SECRET=sx-73d32556-605e-11f0-9dd8-acde48001122
 
-[lru]
-AGENT_MAX_HISTORY_TOKENS=50000
-AGENT_MAX_HISTORY_MESSAGES=20
-AGENT_RECOGNIZE_INTENT_MAX_HISTORY_MESSAGES=6
-USE_RECOGNIZE_INTENT_HISTORY_MESSAGES=True
 
 [redis]
 REDIS_URL=redis://:123456@127.0.0.1:6379
@@ -46,12 +40,5 @@ LOG_FILE_MAX_MB=10
 LOG_BACKUP_COUNT=5
 CONSOLE_OUTPUT=True
 
-[chat]
-CHAT_SESSION_LOCK=False
 
 
-[knowledge_dify]
-DIFY_SERVER_URL=http://dify.xiwudev.cn/v1
-DIFY_API_KEY=dataset-O4i1jbk1DTsUZz5KqndQxo6C
-DIFY_DATASET_ID_LIST=e4d3965e-f66a-45db-8010-fe7acdd46f5d,745e2f10-e7a6-4d54-83ef-13ff09cc700c
-DIFY_DATASET_URL=/datasets/{dataset_id}/retrieve

+ 0 - 9
config/prompt/fixed_intent_prompt.yaml

@@ -1,9 +0,0 @@
-
-
-
-# 系统提示词 - 固定问题
-system_prompt: |
-  学习给定样例,根据问题匹配对应业务场景指令,如果无法匹配,请返回“other”。
-  严格遵守:必须仅返回指令内容,不包含格式说明或额外解释。
-
-

+ 0 - 20
config/prompt/intent_prompt.yaml

@@ -1,20 +0,0 @@
-
-# 系统提示词
-system_prompt: |
-  基于提供的样例,结合用户最近的对话历史上下文进行意图识别,精准匹配对应的业务场景指令。
-  必须优先参考最近的上下文语义及用户意图演变,若问题与样例中的任一业务场景相符,则返回对应指令;若无法匹配任何已定义场景,则返回 cattle_farm_common。
-  严格遵守:仅输出指令字符串,不附加任何解释、说明或格式。
-  用户目前历史上下文信息:
-  {history}
-
-
-
-
-# 意图案例 准备few-shot样例 联系数字专家进行诊断;
-intent_examples: 
-  - inn: 信息查询.
-    out: cattle_farm_common
-
- 
-
-           

+ 0 - 128
core/__init__.py

@@ -8,132 +8,4 @@
 @Date       :2025/7/10 16:30
 '''
 
-from langgraph.prebuilt import create_react_agent
-from langgraph.checkpoint.memory import MemorySaver
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.messages import HumanMessage
-
-from logger.loggering import server_logger
-from utils.utils import get_models
-from function.function_call import FunctionCall
-
-from io import StringIO
-import sys
-
-
-class XiwuzcAgent:
-    """
-        Xiwuzc 智能助手+function call
-    """
-
-    def __init__(self):
-        # 初始化
-        self.init_agent()
-
-    # 初始化 agent 对象
-    def init_agent(self):
-        # 获取部署的模型列表
-        llm, chat, embed = get_models()
-        self.llm = llm
-        self.chat = chat
-        # 初始化 工具列表
-        function_call = FunctionCall()
-        tools = [
-            function_call.query_recently_cattle_farm_ambient_info,
-            function_call.query_recently_cattle_temperature,
-            function_call.query_recently_cattle_eat_water,
-        ]
-        # 创建系统Prompt提示语
-        system_prompt = self.create_sys_prompt()
-
-        prompt = ChatPromptTemplate.from_messages([
-            ("system", system_prompt),
-            ("placeholder", "{messages}"),
-            ("placeholder", "{agent_scratchpad}")
-        ])
-
-        # 创建Agent
-        self.agent_executor = create_react_agent(
-            self.llm,
-            tools=tools,  #
-            prompt=prompt,
-            checkpointer=MemorySaver()
-        )
-
-    def handle_query(self, input_query, session_id):
-        # 流式处理事件
-        config = {"configurable": {"thread_id": session_id}}
-        try:
-            events = self.agent_executor.stream(
-                {"messages": [HumanMessage(content=input_query)]},
-                config=config,
-                stream_mode="values",
-            )
-            result_list = []
-            # 打印流式事件的消息
-            for event in events:
-                message = event["messages"][-1]  # 取最后一步信息
-                result_list.append(message.content)
-                # 转换为字符串并写入日志文件
-                log_content = self.get_pretty_message_str(message)
-                server_logger.info("\n" + log_content.strip())
-
-            final_result = event["messages"][-1].content if result_list else None
-            server_logger.info("=" * 50)
-            server_logger.info(f"最终结果: \n {final_result}")
-            server_logger.info("=" * 50)
-
-            return final_result
-        except Exception as e:
-            server_logger.error(f"处理查询时出错: {e}")
-            raise e
-
-    # agent 非流式输出
-    def handle_invoke_query(self, input_query, session_id):
-        config = {"configurable": {"thread_id": session_id}}
-        try:
-            result = self.agent_executor.invoke(
-                {"messages": [HumanMessage(content=input_query)]},
-                config=config,
-                stream_mode="values",
-            )
-            server_logger.info(f"result={result}")
-            for presult in result["messages"]:
-                server_logger.info(f'【agent】: {presult}')
-            server_logger.info("=" * 50)
-            final_result_conent = result["messages"][-1].content
-            server_logger.info(f"final_result_conent={final_result_conent}")
-            return final_result_conent
-        except Exception as e:
-            server_logger.error(f"处理查询时出错: {e}")
-            raise e
-
-    def get_pretty_message_str(self, message):
-        """
-            捕获 pretty_print() 输出为字符串
-        """
-        captured_output = StringIO()
-        sys.stdout = captured_output
-        server_logger.info(message.pretty_print())
-        sys.stdout = sys.__stdout__
-        return captured_output.getvalue()
-
-    @staticmethod
-    def create_sys_prompt():
-        system_prompt = """
-            你是一个农业智能专家,请根据提供的数据信息和规则信息分析是否存在异常并进行建议。
-            请严格按照以下步骤操作:
-            1. 检查可用工具
-            2. 必要时调用工具获取数据
-            3. 结合数据进行分析
-            4. 给出专业建议
-
-            注意:
-            - 必须通过工具获取最新数据后再分析
-            - 保持回答专业且简洁
-            """
-        return system_prompt
-
-
-
 

+ 39 - 0
core/common_enums.py

@@ -0,0 +1,39 @@
+
+from enum import Enum
+
+
+class ErrorCodeEnum(Enum):
+    """
+        错误码枚举定义
+    """
+    SUCCESS = ('100000', '成功')
+    ERROR = ('100500', '服务异常')
+
+    
+    SESSION_ID_EMPTY = ('100001', '会话ID为空')
+    BUSINSESS_SCENE_ERROR = ('100002', '业务场景错误')
+    INPUT_INFO_EMPTY = ('100003', '用户输入为空')
+    BUSINSESS_SCENE_EMPTY = ('100004', '业务场景为空')
+    BUSINSESS_SCENE_PROMPT_FILE_EMPTY = ('100005', '业务场景提示词文件为空')
+    BUSINSESS_SCENE_PROMPT_FILE_NOT_EXISTS = ('100006', '业务场景提示词文件不存在')
+    BUSINSESS_SCENE_PROMPT_FILE_READ_ERROR = ('100007', '业务场景提示词文件读取异常')
+    
+    def __init__(self, code : str, desc : str):
+        self.code = code
+        self.desc = desc
+
+    
+    def get_item_by_code(self , code : str):
+        """
+            根据code 找枚举项
+        """
+        for item in list(ErrorCodeEnum):
+            if item.code == code:
+                return item
+        return None
+
+    
+
+    def __str__(self) -> str:
+        return self.code + ":"  + self.desc
+    

+ 0 - 155
enums/common_enums.py

@@ -1,155 +0,0 @@
-
-from enum import Enum
-
-
-class ErrorCodeEnum(Enum):
-    """
-        错误码枚举定义
-    """
-    SUCCESS = ('100000', '成功')
-    ERROR = ('100500', '服务异常')
-
-    
-    SESSION_ID_EMPTY = ('100001', '会话ID为空')
-    BUSINSESS_SCENE_ERROR = ('100002', '业务场景错误')
-    INPUT_INFO_EMPTY = ('100003', '用户输入为空')
-    BUSINSESS_SCENE_EMPTY = ('100004', '业务场景为空')
-    BUSINSESS_SCENE_PROMPT_FILE_EMPTY = ('100005', '业务场景提示词文件为空')
-    BUSINSESS_SCENE_PROMPT_FILE_NOT_EXISTS = ('100006', '业务场景提示词文件不存在')
-    BUSINSESS_SCENE_PROMPT_FILE_READ_ERROR = ('100007', '业务场景提示词文件读取异常')
-    
-    def __init__(self, code : str, desc : str):
-        self.code = code
-        self.desc = desc
-
-    
-    def get_item_by_code(self , code : str):
-        """
-            根据code 找枚举项
-        """
-        for item in list(ErrorCodeEnum):
-            if item.code == code:
-                return item
-        return None
-
-    
-
-    def __str__(self) -> str:
-        return self.code + ":"  + self.desc
-    
-
-
-
-class BusinessSceneEnum(Enum):
-    """
-        业务场景枚举定义
-    """
-    COMMON_MODEL_QUERY = ('common_model_query', '通用模型查询场景' , 'common_model_query.yaml')
-    CATTLE_FARM_COMMMON = ('cattle_farm_common', '' , 'cattle_farm_common.yaml')
-    CATTLE_FARM_QUERY = ('cattle_farm_query', '' , 'cattle_farm_query.yaml', "json")
-    CATTLE_FARM_QUERY_FIXED = ('cattle_farm_query_fixed', '-固定' , 'cattle_farm_query_fixed.yaml', "json")
-    CATTLE_FARM_IOT = ('cattle_farm_iot', '' , 'cattle_farm_iot.yaml')
-
-
-
-    
-    CATTLE_FARM_WARNING_PLAN = ('cattle_farm_warning_plan', '' , 'cattle_farm_warning_plan.yaml', "json")
-    CATTLE_FARM_WARNING_TASK_EXECUTE = ('cattle_farm_warning_task_execute', '-任务执行' , 'cattle_farm_warning_task_execute.yaml')
-
-    def __init__(self, code, desc , prompt_file , data_type="text"):
-        self.code = code
-        self.desc = desc
-        self.prompt_file = prompt_file
-        self.data_type = data_type
-
-    @staticmethod  
-    def get_item_by_code(code):
-        """
-            根据code 找枚举项
-        """
-        for item in list(BusinessSceneEnum):
-            if item.code == code:
-                return item
-        return None
-
-    @staticmethod  
-    def get_item_by_code_def_val(code , def_val):
-        """
-            根据code 找枚举项,如果没有找到,则返回默认值
-        """
-        for item in list(BusinessSceneEnum):
-            if item.code == code:
-                return item
-        return def_val
-
-# print(BusinessSceneEnum.CATTLE_FARM_QUERY_PLAN.code , BusinessSceneEnum.CATTLE_FARM_QUERY_PLAN.desc)     # 
-# print(BusinessSceneEnum.CATTLE_FARM_WARNING_PLAN.code , BusinessSceneEnum.CATTLE_FARM_WARNING_PLAN.desc)    # 
-
-
-
-class CacheDataKeyTypeEnum(Enum):
-    """
-        缓存数据data key类型枚举
-            基本信息 cattle_info
-            体温信息 cattle_temperature 
-            步数信息 cattle_walk
-    """
-    
-    CATTLE_INFO = ('cattle_info', '基本信息')
-    CATTLE_TEMPERATURE = ('cattle_temperature', '体温信息')
-    CATTLE_WALK = ('cattle_walk', '步数信息')
-    RETRIEVER_RESOURCES = ('retriever_resources', '私有知识库检索溯源')
-
-    
-    
-    def __init__(self, code : str, desc : str):
-        self.code = code
-        self.desc = desc
-
-    
-    def get_item_by_code(self , code : str):
-        """
-            根据code 找枚举项
-        """
-        for item in list(CacheDataKeyTypeEnum):
-            if item.code == code:
-                return item
-        return None
-
-    
-
-    def __str__(self) -> str:
-        return self.code + ":"  + self.desc
-
-    
-
-
-class UserRoleEnum(Enum):
-    """
-        用户角色定义枚举
-            普通用户 common
-            租户用户 tenant 
-    """
-    
-    COMMON = ('common', '普通用户')
-    TENANT = ('tenant', '租户用户')
-   
-    
-    def __init__(self, code : str, desc : str):
-        self.code = code
-        self.desc = desc
-
-    
-    def get_item_by_code(self , code : str):
-        """
-            根据code 找枚举项
-        """
-        for item in list(UserRoleEnum):
-            if item.code == code:
-                return item
-        return None
-
-    
-
-    def __str__(self) -> str:
-        return self.code + ":"  + self.desc

+ 0 - 34
function/knowledge_dify.py

@@ -1,34 +0,0 @@
-
-
-
-
-from base.config import config_handler
-from logger.loggering import server_logger
-from utils.common import handler_err
-from utils.request_tool import KnowledgeDifyAction
-
-
-class KnowledgeDify:
-    """
-        私有知识库查询处理(dify)
-    """
-
-
-    def __init__(self):
-        self.knowledge_dify_action = KnowledgeDifyAction()
-
-
-    def get_request_knowledge_dify(self , trace_id: str, question: str):
-        """
-            获取知识库内容
-            :param question: 问题
-            :return:
-        """
-        record_list = self.knowledge_dify_action.get_request_knowledge_retrieve_list(
-            params={"query": question} , trace_id=trace_id
-        )
-        return record_list
-
-
-        
-    

+ 0 - 91
function/load_mcp_server.py

@@ -1,91 +0,0 @@
-import json
-import logging
-import os
-from dotenv import load_dotenv
-from typing import Any, Dict
-from langchain_mcp_tools import convert_mcp_to_langchain_tools
-
-from base.config import config_handler
-from logger.loggering import server_logger
-
-from utils.common import handler_err
-
-
-class LoadMcpServer:
-
-    def __init__(self):
-        self.tools = []   # 默认为空列表
-        self.cleanup = None
-        self.mcp_server_config_path = config_handler.get("mcp","MCP_SERVER_CONFIG_PATH")
-        # 加载MCP配置文件信息
-        self.mcp_configs = self.load_mcp_configs(self.mcp_server_config_path)
-    
-
-    def load_mcp_configs(self, conf_file_path: str) -> Dict[str, Any]:
-        """
-            从 JSON 文件加载服务器配置
-            Args:
-                conf_file_path: JSON 配置文件路径
-            Returns:
-                包含服务器配置的字典
-        """
-          # 获取当前文件的目录
-        current_dir = os.path.dirname(__file__)
-        # 构建到 .env 的相对路径
-        conf_file_path = os.path.join(current_dir , '../',  'config' , conf_file_path)
-        server_logger.info(f"加载服务器配置: {conf_file_path}")
-        if not os.path.exists(conf_file_path):
-            server_logger.error(f"配置文件不存在: {conf_file_path}")
-            raise ValueError(f"'conf_file_path':{conf_file_path} 不存在!!")
-        try:
-            with open(conf_file_path, "r") as f:
-                return json.load(f)
-
-
-        except Exception as e:
-            server_logger.error(f"❌ 加载 MCP 配置文件失败: {e}")
-
-
-    async def get_mcp_tools(self):
-        """
-            加载MCP-Server对应的工具列表
-        """
-        try:
-            if not self.tools:
-                # ...原有工具加载逻辑...
-                tools, cleanup = await convert_mcp_to_langchain_tools(
-                    self.mcp_configs,
-                    server_logger
-                )
-                self.tools = tools
-                self.cleanup = cleanup
-            # 打印工具列表
-            self.server_logger_tools_info()
-            server_logger.info(f"tools init success")
-        except Exception as e:
-            handler_err(server_logger, e, err_name="工具加载失败")
-        return self.tools
-    
-
-    def server_logger_tools_info(self):
-        """
-            打印 MCP 模型工具列表
-        """
-        server_logger.info(f"MCP 模型工具总共[{len(self.tools)}]个")
-        server_logger.info(f"MCP 模型工具列表分别如下:")
-        for tool in self.tools:
-            server_logger.info("=" * 50)
-            server_logger.info(f"工具名称: {tool.name}")
-            server_logger.info(f"工具描述: {tool.description}")
-            server_logger.info(f"工具参数: {tool.args}")
-
-    async def close(self):
-        """
-            关闭MCP服务端连接资源
-        """
-        if self.cleanup is not None:
-            try:
-                await self.cleanup()
-                server_logger.info("✅ MCP 资源已成功清理")
-            except Exception as e:
-                server_logger.error(f"⚠️ 清理 MCP 资源失败: {e}")

+ 0 - 50
function/local_function.py

@@ -1,50 +0,0 @@
-
-
-from datetime import datetime
-from function.knowledge_dify import KnowledgeDify
-from logger.loggering import server_logger 
-from utils.redis_utils import set_redis_result_cache_data
-from utils.tool_utils import verify_user_role
-from enums.common_enums import CacheDataKeyTypeEnum
-import json
-# 函数定义 要语义化,不能汉语拼音
-def get_current_datetime() -> str:
-    """
-        获取当前的日期和时间
-    """
-    now = datetime.now()
-    formatted_data = now.strftime("%Y-%m-%d %H:%M:%S")
-    return formatted_data
-
-
-
-
-knowledge_dify = KnowledgeDify()
-
-
-async def get_knowledge_answer(question: str , trace_id: str , user_role: str) -> str:
-    """
-        相关知识问题检索的工具
-        知识分类包括:政策法规、市场分析、基础、管理技术、案例库、高频问题、综合类
-        参数:
-            question: 用户问题中(用户输入问题)
-            trace_id: 日志链路跟踪ID
-            user_role: 用户角色
-    """
-    # 如果是普通用户直接返回无,只有租户用户才能检索查询知识库
-    if not verify_user_role(user_role):
-        return "无"
-    
-    record_list = knowledge_dify.get_request_knowledge_dify(trace_id=trace_id, question=question)
-    if record_list is None  or len(record_list) == 0:
-        return "无"
-    # 设置缓存数据,用于智能查询
-    retriever_resources_list = [record["segment"]["document"] for record in record_list]
-    server_logger.info(trace_id=trace_id, msg=f"知识库检索结果列表: retriever_resources_list={retriever_resources_list}")
-    await set_redis_result_cache_data(CacheDataKeyTypeEnum.RETRIEVER_RESOURCES.code, trace_id, json.dumps(retriever_resources_list))
-
-    content_list = [record["segment"]["content"] for record in record_list]
-    #server_logger.info(trace_id=trace_id, msg=f"知识库检索结果列表: content_list={content_list}")
-    return "\n".join(content_list)
-
-

+ 0 - 178
generate/model_generate.py

@@ -1,178 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :model_generate.py
-@IDE        :PyCharm
-@Author     :
-@Date       :2025/7/14 14:22
-'''
-
-from typing import List, Dict, Any, Optional, AsyncGenerator
-from langchain_core.prompts import HumanMessagePromptTemplate
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
-from langgraph.prebuilt import ToolNode
-
-from utils.utils import get_models
-from views import mcp_server
-from utils.yaml_utils import system_prompt_config
-from logger.loggering import server_logger
-
-
-class XiwuzcModelGenerateClient:
-    """
-        主要是生成式模型
-    """
-
-    def __init__(self):
-        # 获取部署的模型列表
-        llm, chat, embed = get_models()
-        self.llm = llm
-        self.chat = chat
-        # 构造工具列表
-        self.tool_node_list = [] # ToolNode(mcp_server.tools)
-        # 模型绑定工具列表
-        self.llm_with_tools = None #llm.bind_tools(mcp_server.tools)
-        # 工具调用系统提示词
-        self.system_prompt = "" #system_prompt_config["tools_system_prompt"]
-
-
-    def get_prompt_template(self):
-        """
-            构造普通Prompt提示词模板
-        """
-        human_template = """
-            {system_message}
-            用户的问题为:
-                {question}  
-            答案为:
-        """
-        human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
-        chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
-        return chat_prompt_template
-
-    def get_model_generate_stream(self, task_prompt_info: dict, op_id, input_query, context=None, supplement_info=None):
-        """
-            模型生成链
-        """
-        # Step 1: 定义系统提示词模板 system_prompt
-
-        # Step 2: 构建完整的 prompt 模板
-        prompt_template = ChatPromptTemplate.from_messages([
-            ("system", task_prompt_info["task_prompt"]),
-            ("human", "{input}")
-        ])
-        # Step 3: 初始化模型
-        # Step 4: 使用模板格式化输入
-        messages = prompt_template.invoke({"input": input_query})
-        # Step 5: 流式调用模型
-        response = self.llm.stream(messages)
-        # Step 6: 逐 token 输出(打字机效果)
-        for chunk in response:
-            yield chunk.content
-
-
-    async def get_model_tools_call(self, operate_id: str, session_id, task_prompt_info: dict, input_query, context=None, supplement_info=None,
-                           header_info=None):
-        """
-            工具调用
-        """
-         # 构建输入消息
-        input_message = self.get_input_context(
-                trace_id= operate_id,
-                task_prompt_info=task_prompt_info,
-                input_query=input_query,
-                context=context,
-                supplement_info=supplement_info,
-                header_info=header_info
-            )
-        # Step 1: 构建完整的 prompt 模板
-        prompt_template = ChatPromptTemplate.from_messages([
-            ("system", self.system_prompt),
-            ("human", "{input}")
-        ])
-        # Step 2: 调用带有工具的 LLM
-        # response = self.llm_with_tools.invoke(
-        #     [HumanMessage(content="北京的天气怎么样?")]
-        # )
-        messages = prompt_template.format_messages(input=input_message)
-        response = await self.llm_with_tools.ainvoke(messages)
-        #server_logger.info(f"response={response},{dir(response)}")
-        # 2. 检查是否有工具调用
-        if "tool_calls" in response.additional_kwargs:
-            # 构造符合要求的 AIMessage
-            tool_call_message = AIMessage(
-                content="",
-                additional_kwargs=response.additional_kwargs
-            )
-            server_logger.info(operate_id=operate_id, msg=f"self.tool_node_list={self.tool_node_list}")
-            # 传入格式化的消息
-            tool_response = await self.tool_node_list.ainvoke({"messages": [tool_call_message]})
-            #server_logger.info(operate_id=operate_id, msg=f"tool_response={tool_response}")
-            tools_message_result_list = []
-            for tools_message in tool_response["messages"]:
-                tools_message_result_list.append(tools_message.content)
-            result = "\n".join(tools_message_result_list)
-            server_logger.info(operate_id=operate_id, msg=f"tool_calls.tool_response.result={result}")
-            result = self.clean_json_output(result)
-            return result
-        else:
-            result = response.content
-            server_logger.info(operate_id=operate_id, msg=f"response.content={result}")
-            result = self.clean_json_output(result)
-            return result
-
-
-
-    def get_input_context(
-            self,
-            trace_id: str,
-            task_prompt_info: dict,
-            input_query: str,
-            context: Optional[str] = None,
-            supplement_info: Optional[str] = None,
-            header_info: Optional[Dict] = None
-    ) -> str:
-        #server_logger.info(f"task_prompt_info: {task_prompt_info}")
-        """构建问题和上下文"""
-        context = context or "无"
-        supplement_info = supplement_info or "无"
-        token = header_info.get('token', '') if header_info else ''
-        tenantId = header_info.get('tenantId', '') if header_info else ''
-        task_prompt_info_str = task_prompt_info["task_prompt"]
-
-        # 针对场景优化的上下文提示
-        base_context_prompt = """
-            日志链路跟踪ID:{trace_id}
-            任务信息:{task_prompt_info_str}
-            相关上下文数据:{context}
-            补充信息:{supplement_info}
-            户问题:{input}
-            安全验证:{token}
-            场ID:{tenantId}
-        """
-        return base_context_prompt.format(
-            trace_id=trace_id,
-            task_prompt_info_str=task_prompt_info_str,
-            context=context,
-            input=input_query,
-            supplement_info=supplement_info,
-            token=token,
-            tenantId=tenantId
-        )
-
-
-    def clean_json_output(self , raw_output: str) -> str:
-        """
-            去除开头和结尾的 ```json 和 ```
-        """
-        cleaned = raw_output.strip()
-        if cleaned.startswith("```json"):
-            cleaned = cleaned[7:]  # 去掉开头的 ```json
-        if cleaned.endswith("```"):
-            cleaned = cleaned[:-3]  # 去掉结尾的 ```
-        return cleaned.strip()
-
-#
-xwzc_generate_client = XiwuzcModelGenerateClient()

+ 0 - 27
schemas/cattle_farm.py

@@ -1,27 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :cattle_farm.py
-@IDE        :PyCharm
-@Author     :
-@Date       :2025/7/11 12:41
-'''
-from typing import Optional
-
-from pydantic import BaseModel, constr, Field
-from enums.common_enums import BusinessSceneEnum, ErrorCodeEnum, UserRoleEnum
-
-
-class FarmConfig(BaseModel):
-    sessionId: constr(max_length=128) =Field(description="会话id")
-    userRole: constr(max_length=15) =Field(default=UserRoleEnum.COMMON.code,description="用户角色")
-
-
-
-class CattleFarm(BaseModel):
-    config: FarmConfig
-    input: Optional[str] = Field(description="用户输入")
-    businessScene: str = Field(default=None, description="业务场景")
-    context: Optional[str]  = Field(default=None, description="参考上下文")
-    supplementInfo: Optional[str] = Field(default=None, description="补充信息")

+ 22 - 0
schemas/test_schemas.py

@@ -0,0 +1,22 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :test_schemas.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/11 12:41
+'''
+from typing import Optional
+from pydantic import BaseModel, constr, Field
+
+
+class FormConfig(BaseModel):
+    session_id: constr(max_length=128) =Field(description="会话id")
+
+
+
+class TestForm(BaseModel):
+    config: FormConfig
+    input: Optional[str] = Field(description="用户输入")
+    context: Optional[str]  = Field(default=None, description="参考上下文")

+ 3 - 3
server/app.py

@@ -12,19 +12,19 @@ from fastapi.middleware.cors import CORSMiddleware
 from fastapi import FastAPI
 
 from logger.loggering import server_logger
-from views.cattle_farm_views import cattle_router
+from views.test_views import test_router
 
 
 # 创建 FastAPI 应用
 app = FastAPI(
     title=" Agent API",
     version="0.2",
-    description=" Agent+MCP API",
+    description=" Agent API",
     lifespan=lifespan
 )
 
 
-app.include_router(cattle_router)
+app.include_router(test_router)
 
 
 # 添加 CORS 中间件

+ 0 - 54
test/test_redis.py

@@ -1,54 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :test.py.py
-@IDE        :PyCharm
-@Author     :
-@Date       :2025/7/11 12:23
-'''
-
-import os
-import sys
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-
-
-import redis
-#from langchain.storage import RedisStore
-from langchain_community.storage import RedisStore
-import asyncio
-from redis import Redis as SyncRedisClient
-
-async def main():
-    # 创建同步Redis客户端(注意不是aioredis)
-    redis_client = redis.Redis.from_url(
-        "redis://localhost:6379",
-        decode_responses=False  # LangChain需要bytes
-    )
-    # redis_client = SyncRedisClient.from_url(
-    #     "redis://localhost:6379",
-    #     decode_responses=False  # LangChain需要bytes
-    # )
-    
-    # 创建存储
-    store = RedisStore(client=redis_client)
-    
-    # 存储数据(注意是set不是aset)
-   
-   
-   # 存储数据:使用 mset(即使只有一个键)
-    store.mset([("test_key", b"test_value")])
-
-    # 获取数据:使用 mget
-    value = store.mget(["test_key"])
-    print(f"Retrieved: {value}")  # [b'test_value']
-
-    # 如果你想提取第一个值
-    if value and value[0] is not None:
-        print(f"Value: {value[0].decode('utf-8')}")  # 输出: Value: test_value
-
-
-    # 关闭连接(可选)
-    redis_client.close()
-
-asyncio.run(main())

+ 0 - 48
test/test_redis2.py

@@ -1,48 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :test.py.py
-@IDE        :PyCharm
-@Author     :
-@Date       :2025/7/11 12:23
-'''
-import os
-import sys
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-
-
-import redis
-#from langchain.storage import RedisStore
-from langchain_community.storage import RedisStore
-import asyncio
-from redis import Redis as SyncRedisClient
-
-async def main():
-    # 创建同步Redis客户端(注意不是aioredis)
-    redis_client = redis.Redis.from_url(
-        "redis://localhost:6379",
-        decode_responses=False  # LangChain需要bytes
-    )
-    # redis_client = SyncRedisClient.from_url(
-    #     "redis://localhost:6379",
-    #     decode_responses=False  # LangChain需要bytes
-    # )
-    
-    # 创建存储
-    store = RedisStore(client=redis_client)
-    # AttributeError: 'RedisStore' object has no attribute 'aset'. Did you mean: 'amset'?
-    # 异步存储数据(使用aset而非set)
-    await store.aset("test_key", b"test_value")
-    
-    # 异步获取数据(使用aget而非get)
-    value = await store.aget("test_key")
-    print(f"获取的值: {value}")  # 输出: b'test_value'
-    
-    # 异步删除数据(使用adelete而非delete)
-    await store.adelete("test_key")
-
-    # 关闭连接(可选)
-    redis_client.close()
-
-asyncio.run(main())

+ 0 - 88
test/test_redis3.py

@@ -1,88 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :test.py.py
-@IDE        :PyCharm
-@Author     :
-@Date       :2025/7/11 12:23
-'''
-
-import os
-import sys
-
-from sqlalchemy.orm import session
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-
-import asyncio
-from base.redis_connection import RedisConnectionFactory
-#from langchain_core.checkpoints import BaseCheckpointSaver
-
-async def main():
-    # 直接获取 RedisStore
-    redis_store = await RedisConnectionFactory.get_redis_store()
-    await redis_store.set("some_key", "some_value")
-    data = await redis_store.get("some_key") 
-    print(data)
-    # 2. 创建 checkpointer
-    # checkpointer = AsyncCheckpointSaver(store=redis_store)
-    # print(checkpointer)
-
-    # 直接获取 RedisStore
-    #self.redis_store = await RedisConnectionFactory.get_langchain_redis_store()
-    from langgraph.checkpoint.redis import RedisSaver
-    from redis import Redis
-    import redis     
-
-    # Step 1: 连接 Redis
-    redis_client = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
-    checkpointer = RedisSaver(redis_client=redis_client)
-    print(f"checkpointer={checkpointer}")
-
-    #from langchain.storage import RedisStore
-    #from langchain_community.storage import RedisStore
-    #redis_store = RedisStore.from_client(redis_client) 
-    #AttributeError: type object 'RedisStore' has no attribute 'from_client'
-    # 报错   UnboundLocalError: cannot access local variable 'RedisStore' where it is not associated with a value
-    #print(f"redis_store={redis_store}")
-
-
-    #from langchain_community.checkpoint import RedisCheckpointer
-    #from langchain.agents import RedisCheckpointer
-    #from langchain.memory import RedisCheckpointer
-    #checkpointer2 = RedisCheckpointer(redis_store)
-    #print(f"checkpointer2={checkpointer2}")
-    
-
-    #from langchain.storage import RedisStore # 已经过时
-    from langchain_community.storage import RedisStore  
-    from langchain.memory import ConversationBufferMemory
-    from langchain_community.chat_message_histories import RedisChatMessageHistory
-    session_id = "session_id"
-    # 初始化 Redis 存储
-    redis_url = "redis://localhost:6379/0"
-    #redis_store = RedisStore.from_url(redis_url)
-
-    redis_client = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
-    redis_store = RedisStore(client=redis_client)
-
-
-    # 使用 RedisChatMessageHistory 存储对话历史
-    chat_history = RedisChatMessageHistory(
-        session_id=session_id,  # 唯一标识会话
-        url=redis_url  # 或直接使用 redis_client
-    )
-    # 使用 Redis 存储记忆
-    memory = ConversationBufferMemory(
-        memory_key="chat_history",
-        return_messages=True,
-        chat_memory=chat_history  # 或其他兼容存储
-    )
-    print(f"memory={memory}")
-
-
-    # redis_client = Redis.from_url("redis://localhost:6379")
-    # checkpointer = RedisSaver(connection=redis_client)
-    # checkpointer.setup()
-
-asyncio.run(main())

+ 0 - 128
test/test_utils.py

@@ -1,128 +0,0 @@
-
-
-import os
-import sys
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-
-from datetime import datetime
-from typing import List, Dict, Optional
-
-from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
-from langchain_openai import ChatOpenAI
-
-from test_config import config_handler
-from logger.loggering import server_logger
-
-
-def get_models():
-    """
-    获取模型,模型类型 默认为deepseek 、qwen
-    """
-    model_type = config_handler.get("model", "MODEL_TYPE")
-    server_logger.info(f"get_models -> model_type:{model_type}")
-    if model_type.upper() == "QWEN":
-        return get_deploy_qwen_models()
-    return get_deepseek_models()
-
-
-def get_deepseek_models():
-    """
-    获取DeepSeek模型
-    """
-    deepseek_model_server_url = config_handler.get("deepseek", "DEEPSEEK_SERVER_URL")
-    deepseek_chat_model_id = config_handler.get("deepseek", "DEEPSEEK_MODEL_ID")
-    deepseek_api_key = config_handler.get("deepseek", "DEEPSEEK_API_KEY")
-    server_logger.info(f"get_deepseek_models -> chat_model_id:{deepseek_chat_model_id},api_key:{deepseek_api_key}")
-    if deepseek_model_server_url is None or deepseek_chat_model_id is None or deepseek_api_key is None:
-        server_logger.error("请设置环境变量: DEEPSEEK_SERVER_URL, DEEPSEEK_MODEL_ID, DEEPSEEK_API_KEY")
-        raise Exception("设置环境变量: DEEPSEEK_SERVER_URL, DEEPSEEK_MODEL_ID, DEEPSEEK_API_KEY")
-    # llm 大模型
-    llm = ChatOpenAI(base_url=deepseek_model_server_url,
-                     api_key=deepseek_api_key,
-                     model=deepseek_chat_model_id,
-                     max_tokens=4096,
-                     temperature=0.3,
-                     top_p=0.7,
-                     extra_body={
-                         "enable_thinking": False  # 添加这个参数以避免报错
-                     })
-    # chat 大模型
-    chat = ChatOpenAI(base_url=deepseek_model_server_url,
-                      api_key=deepseek_api_key,
-                      model=deepseek_chat_model_id,
-                      max_tokens=4096,
-                      temperature=0.3,
-                      top_p=0.2,
-                      extra_body={
-                          "enable_thinking": False  # 添加这个参数以避免报错
-                      })
-    embed = None
-    return llm, chat, embed
-
-
-# 获取千问模型
-def get_deploy_qwen_models():
-    """
-        加载千问系列大模型-魔搭在线Qwen3 API服务
-    """
-    model_server_url = config_handler.get("qwen", "MODEL_SERVER_URL")
-    chat_model_id = config_handler.get("qwen", "CHAT_MODEL_ID")
-    api_key = config_handler.get("qwen", "API_KEY")
-    embedding_model_id = config_handler.get("qwen", "EMBED_MODEL_ID")
-    # temperature = os.getenv("CHAT_MODEL_TEMPERATURE")
-    server_logger.info(
-        f"get_qwen_chat_model -> chat_model_id:{chat_model_id},api_key:{api_key},embedding_model_id:{embedding_model_id}")
-    if model_server_url is None or chat_model_id is None or api_key is None:
-        server_logger.error("请设置环境变量: MODEL_SERVER_URL, CHAT_MODEL_ID, API_KEY")
-        raise Exception("请设置环境变量: MODEL_SERVER_URL, CHAT_MODEL_ID, API_KEY")
-
-    # llm 大模型
-    llm = ChatOpenAI(base_url=model_server_url,
-                     api_key=api_key,
-                     model=chat_model_id,
-                     max_tokens=1024,
-                     temperature=0.5,
-                     top_p=0.7,
-                     extra_body={
-                         "enable_thinking": False  # 添加这个参数以避免报错
-                     })
-    # chat 大模型
-    chat = ChatOpenAI(base_url=model_server_url,
-                      api_key=api_key,
-                      model=chat_model_id,
-                      max_tokens=1024,
-                      temperature=0.01,
-                      top_p=0.2,
-                      extra_body={
-                          "enable_thinking": False  # 添加这个参数以避免报错
-                      })
-
-    # embedding 大模型 text-embedding-v3  text-embedding-v4
-    # from langchain_community.embeddings import DashScopeEmbeddings
-    embed = None  # DashScopeEmbeddings(model=embedding_model_id)
-    return llm, chat, embed
-
-
-def test_qwen_chat_model():
-    #  获取模型
-    llm, chat, embed = get_deploy_qwen_models()
-    example_query = "你好,你是谁?"
-    result = llm.invoke(input=example_query)
-    server_logger.info(f"result={result}")
-    print(f"result={result}")
-
-
-def test_deepseek_chat_model():
-    #  获取模型
-    llm, chat, embed = get_deepseek_models()
-    example_query = "你好,你是谁?"
-    result = llm.invoke(input=example_query)
-    server_logger.info(f"result={result}")
-    print(f"result={result}")
-
-
-
-
-if __name__ == "__main__":
-    test_qwen_chat_model()  # 运行
-    #test_deepseek_chat_model()

+ 0 - 32
test/test_yaml.py

@@ -1,32 +0,0 @@
-
-
-
-import os
-import sys
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-import yaml
-from logger.loggering import server_logger
-from utils.yaml_utils import system_prompt_config , get_business_scene_prompt
-
-
-# 测试yaml 文件读取
-def test_get_system_prompt():
-    system_prompt = system_prompt_config
-    server_logger.info(f"获取系统提示语,template: {system_prompt["template"]}")
-
-
-# 获取任务 提示词
-def test_get_task_prompt():
-    #business_scene = "common_model_query"
-    #business_scene = "cattle_farm_common"
-    #business_scene = "cattle_farm_query"
-    business_scene = "cattle_farm_warning_plan"
-    #business_scene = "cattle_farm_warning_task_execute"
-    business_scene_enum , task_prompt_config = get_business_scene_prompt(business_scene)
-    server_logger.info(f"获取系统提示语,business_scene_enum:{business_scene_enum},task_prompt: {task_prompt_config["task_prompt"]}")
-    server_logger.info(f"获取系统提示语,business_scene_enum:{business_scene_enum},template: {task_prompt_config["template"]}")
-
-
-if __name__ == '__main__':
-    #test_get_system_prompt() # 获取系统提示语
-    test_get_task_prompt() # 获取任务 提示词

+ 0 - 76
test/问题/流式推理打印推理过程.txt

@@ -1,76 +0,0 @@
-怎么通过如下代码 打印agent整个推理执行过程,if 'messages' in event and event['messages']: 这个条件不满足,
-导致无法打印  event["messages"][-1].pretty_print()
- # 流式执行
-                events = self.agent_executor.astream_events(
-                    {"messages": all_messages},
-                    config=config,
-                    stream_mode="values"
-                )
-
-                full_response = []
-                buffer = []
-                last_flush_time = time.time()
-
-                # 流式处理事件
-                async for event in events:
-                        server_logger.info(trace_id=trace_id , msg=f"Event: {event}")
-                        if 'messages' in event and event['messages']:
-                            event["messages"][-1].pretty_print()
-                   
-                    if 'chunk' in event['data'] and "on_chat_model_stream" in event['event']:
-                        chunk = event['data']['chunk'].content
-                        full_response.append(chunk)
-
-                        # 缓冲管理策略
-                        buffer.append(chunk)
-                        current_time = time.time()
-
-                        # 满足以下任一条件即刷新缓冲区
-                        if (len(buffer) >= 3 or  # 达到最小块数
-                                (current_time - last_flush_time) > 0.5 or  # 超时
-                                any(chunk.endswith((c, f"{c} ")) for c in
-                                    ['.', '。', '!', '?', '\n', ';', ';'])):  # 自然断点
-
-                            # 合并并发送缓冲内容
-                            combined = ''.join(buffer)
-                            yield combined
-
-                            # 重置缓冲
-                            buffer.clear()
-                            last_flush_time = current_time
-
-P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | REQUEST | 开始请求: POST http://localhost:8001/queryex/stream, start_time=1756173489.7427042
-P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | config=FarmConfig(sessionId='D00032') input='查询10号信息' businessScene=None context=None supplementInfo=None
-P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     |            | system | [使用用户最新历史记录作为意图识别]use_history_recognize_intent: True
-P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     |            | system | get_models -> model_type:deepseek
-P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     |            | system | get_deepseek_models -> chat_model_id:deepseek-chat,api_key:sk-479e4ea23d8e42bfb982a54137094a7b
-P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | redis 内存上下文历史初始完成=chat_memory=<langchain_community.chat_message_histories.redis.RedisChatMessageHistory object at 0x110ac96a0> return_messages=True memory_key='chat_history'
-P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     |            | system | 增加用户历史记录,用于意图识别,prompt配置.system_prompt: 基于提供的样例,结合用户最近的对话历史上下文进行意图识别,精准匹配对应的业务场景指令。
-必须优先参考最近的上下文语义及用户意图演变,若问题与样例中的任一业务场景相符,则返回对应指令;若无法匹配任何已定义场景,则返回 cattle_farm_common。
-严格遵守:仅输出指令字符串,不附加任何解释、说明或格式。
-用户目前历史上下文信息:
-无
-
-P31216.T140704636194752 | 2025-08-26 09:58:13 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | 使用意图识别:business_scene=cattle_farm_query
-P31216.T140704636194752 | 2025-08-26 09:58:13 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | business_scene_enum:cattle_farm_query Get prompt successfully.
-P31216.T140704636194752 | 2025-08-26 09:58:13 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | queryex | session_id:D00032, business_scene:cattle_farm_query,final_result_data_type:Markdown ,input_data: 查询10号信息
-P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | 固定问题意图识别系统: input=查询10号信息, result=question_nine
-P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | 配置固定问题意图识别结果列表: recognize_intent_out_list=['question_one', 'question_two', 'question_three', 'question_four', 'question_five', 'question_six', 'question_seven', 'question_eight']
-P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | fixed_flag=False,answer_result=查询10号信息
-P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | config=FarmConfig(sessionId='D00032') input='查询10号信息' businessScene=None context=None supplementInfo=None
-P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     |            | system | 系统提示词 system_prompt:家于一身的AI助手"",专注于为中提供全方位的智能化指导。
-      你的建议要务实、经济、易操作,并能基于物联网数据提供精准预警和具体解决方案。
-          1. 输出要求:
-            - 默认以Markdown文本格式输出
-            - 如果明确返回json格式,请严格规范的json格式(不包含任何额外说明文字)
-          2. 内容规范:
-            - 禁止包含任何敏感信息(密钥/Token/API等)
-          3. 解析数据规则
-            - 参数说明:返回数据类型 data_type 默认为 text
-            - 工具返回的json结构,其中data中的是需要数据字段,code是错误码,msg是错误信息
-          4. json响应模版:
-              {{
-                 "字段1": "值1",
-                 "字段2": "值2",
-                 "字段3": "值3"
-               }}

+ 0 - 114
utils/request_tool.py

@@ -1,114 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :request_tool.py
-@IDE        :Cursor
-@Author     : 
-@Date       :2025/08/21
-'''
-import json
-import requests
-import time
-from requests.exceptions import RequestException
-from base.config import config_handler
-from logger.loggering import server_logger
-from utils.common import handler_err
-
-
-class KnowledgeDifyAction:
-    """
-    与后端http对接交互类,支持 Token 认证和会话保持
-    """
-
-    def __init__(self, token=None, use_session=True):
-        """
-        初始化连接
-        :param host: 主机地址
-        :param port: 端口号
-        :param token: 认证令牌(可选)
-        :param use_session: 是否使用会话保持(默认True)
-        """
-        self.dify_server_url = config_handler.get("knowledge_dify","DIFY_SERVER_URL")
-        self.dify_api_key = config_handler.get("knowledge_dify","DIFY_API_KEY")
-        self.dify_dataset_id_list_str = config_handler.get("knowledge_dify","DIFY_DATASET_ID_LIST")
-        self.dify_dataset_id_list = [dataset_id for dataset_id in self.dify_dataset_id_list_str.split(",")]
-        self.token = self.dify_api_key
-        self.use_session = use_session
-       
-        # 创建会话对象(如果需要会话保持)
-        if self.use_session:
-            self.session = requests.Session()
-            # 如果提供了token,添加到会话头
-            if self.token:
-                self.session.headers.update({"Authorization": f"Bearer {self.token}"})
-
-    
-
-
-    def get_request_knowledge_retrieve_list(self , params=None, headers=None, timeout=20, trace_id=""):
-        """
-           dify 批量知识库检索
-            :param params: 请求参数(可选)
-            :param headers: 额外请求头(可选)
-            :param timeout: 超时时间(秒,默认5秒)
-            :param trace_id: 操作id
-            :return: 响应内容(json格式data 内容)
-        """
-        server_logger.info(trace_id=trace_id, msg=f"开始执行 dify 批量知识库检索,dify_dataset_id_list:{self.dify_dataset_id_list}")
-        all_record_list = []
-        for dataset_id in self.dify_dataset_id_list:
-            response = self.get_request_knowledge_retrieve(trace_id=trace_id ,dataset_id=dataset_id , params=params , headers=headers , timeout=timeout)
-             # 判断返回结果 ,如果 存在code 则返回错误
-            if "code" in response:
-                error_msg = f"检索知识库内容失败,详细信息:{response['status']-{response["code"]}-{response['message']}}"
-                server_logger.info(f"dataset_id:{dataset_id}, {error_msg}", trace_id=trace_id)
-                continue
-            # 把 所有 检索列表 合并 为一个 list
-            all_record_list += response["records"]
-
-        return all_record_list
-
-
-
-
-    def get_request_knowledge_retrieve(self , dataset_id , params=None, headers=None, timeout=20, trace_id=""):
-        """
-            dify 知识检索 -发送HTTP请求
-            :param params: 请求参数(可选)
-            :param headers: 额外请求头(可选)
-            :param timeout: 超时时间(秒,默认5秒)
-            :param trace_id: 操作id
-            :return: 响应内容(json格式data 内容)
-        """
-        dify_dataset_url = config_handler.get("knowledge_dify","DIFY_DATASET_URL")
-        # 替换 知识库id 配置
-        dify_dataset_url = dify_dataset_url.format(dataset_id=dataset_id)
-         # 准备请求头
-        request_headers = {}
-        request_headers["Content-Type"] = "application/json"
-        if self.token:  # 非会话模式需单独添加token
-            request_headers["Authorization"] = f"Bearer {self.token}"
-        if headers:  # 合并额外请求头
-            request_headers.update(headers)
-        request_url = "{dify_server_url}{dify_dataset_url}".format(
-            dify_server_url=self.dify_server_url, dify_dataset_url=dify_dataset_url)
-        server_logger.info(f"{request_url}, header: {request_headers}, params={params}", trace_id=trace_id)
-
-        try:
-            response = requests.post(
-                                request_url,
-                                json=params,
-                                headers=request_headers,
-                                timeout=timeout
-                            )
-            # 检查HTTP状态码
-            response_data = response.json()
-            server_logger.info(f"url: {request_url}, dataset_id:{dataset_id}, response: {response_data}", trace_id=trace_id)
-            return response_data
-        except RequestException as e:
-            handler_err(server_logger, trace_id=trace_id, err=e, err_name='get_request_knowledge_retrieve')
-            #raise e
-        return {}
-    
-    

+ 1 - 76
utils/tool_utils.py

@@ -2,7 +2,7 @@ import time
 from math import log
 import os
 from dotenv import load_dotenv
-from enums.common_enums import BusinessSceneEnum, ErrorCodeEnum, UserRoleEnum
+from core.common_enums import ErrorCodeEnum
 from functools import wraps
 
 from logger.loggering import server_logger
@@ -23,20 +23,11 @@ def verify_param(param: dict):
     """
     input_data = param.get("input")
     session_id = param.get("config").get("session_id")
-    businessScene = param.get("businessScene")
     if input_data is None:
         raise ValueError(ErrorCodeEnum.INPUT_INFO_EMPTY.__str__)
     if session_id is None:
         raise ValueError(ErrorCodeEnum.SESSION_ID_EMPTY.__str__)
-    # 是否可使用默认的通用模型查询 默认 False
-    use_default_common_model_query = os.environ.get("USE_DEFAULT_COMMON_MODEL_QUERY" , False)
-    server_logger.info(f"使用可默认的通用模型查询: {use_default_common_model_query}")
     
-    if not use_default_common_model_query:
-        if businessScene is None:
-            raise ValueError(ErrorCodeEnum.BUSINSESS_SCENE_EMPTY.__str__)
-        if not BusinessSceneEnum.get_item_by_code(param.get('businessScene')):
-            raise ValueError(ErrorCodeEnum.BUSINSESS_SCENE_ERROR.__str__)
 
 
 
@@ -48,69 +39,3 @@ def get_system_prompt() -> str:
     server_logger.info(f"获取系统提示语: {system_prompt}")
     return str(system_prompt)
 
-
-
-def get_business_scene_prompt(business_scene):
-    """
-        获取业务场景的提示语
-    """
-    # 默认公共查询提示语
-    business_scene_enum = BusinessSceneEnum.COMMON_MODEL_QUERY
-    prompt_file = business_scene_enum.prompt_file
-     # 是否可使用默认的通用模型查询 默认 False
-    use_default_common_model_query = os.environ.get("USE_DEFAULT_COMMON_MODEL_QUERY" , False)
-    if not business_scene is None:
-        business_scene_enum = BusinessSceneEnum.get_item_by_code(business_scene)
-        if not business_scene_enum:
-            raise ValueError("未找到枚举值")
-        if business_scene_enum.prompt_file is None:
-            raise ValueError("业务场景不存在")
-        prompt_file = business_scene_enum.prompt_file
-    
-    prompt_file = os.path.join(current_dir , '../', 'config', 'prompt' , prompt_file)
-    server_logger.info(f"获取业务场景提示语: {prompt_file}")
-    if not os.path.exists(prompt_file):
-        raise ValueError("业务场景不存在")
-    
-    try:
-        with open(prompt_file, 'r', encoding='utf-8') as f:
-          return business_scene_enum , '\n'.join(f.readlines())
-    except Exception as e:
-        handler_err(server_logger, e,err_name="get_business_scene_prompt")
-        server_logger.error(f"获取业务场景提示语失败: {e}")
-        raise e
-
-
-def get_fixed_problem_answer_txt_content(file_name: str):
-    """
-        获取固定问题答案内容
-    """
-    file_name = file_name+".txt"
-    answer_txt_file = os.path.join(current_dir , '../', 'config', 'fixed_answer' , file_name)
-    server_logger.info(f"固定回答文本内容: {answer_txt_file}")
-    if not os.path.exists(answer_txt_file):
-        raise ValueError("固定回答文本不存在")
-    
-    try:
-        result_list = []
-        with open(answer_txt_file, 'r', encoding='utf-8') as f:
-           result_list=f.readlines()
-        return "".join(result_list)
-    except Exception as e:
-        handler_err(server_logger, e,err_name="get_fixed_problem_answer_txt_content")
-        server_logger.error(f"获取固定回答文本失败: {e}")
-        raise e
-
-
-
-
-def verify_user_role(user_role: str):
-    """
-        验证用户角色
-            普通用户 common  ,不能检索查询知识库
-            租户用户 tenant  ,只有租户才能检索查询知识库
-    """
-    if user_role in [UserRoleEnum.TENANT.code]:
-        return True
-    
-    return False 

+ 0 - 97
utils/yaml_utils.py

@@ -15,7 +15,6 @@ from logger.loggering import server_logger
 
 import os
 from dotenv import load_dotenv
-from enums.common_enums import BusinessSceneEnum, ErrorCodeEnum
 from functools import wraps
 
 from logger.loggering import server_logger
@@ -30,69 +29,6 @@ conf_file_path = os.path.join(current_dir , '../',  '.env')
 
 
 
-def get_fixed_problem_answer() -> dict:
-    """
-        固定问题回答
-    """
-     # 构建文件路径 判断文件是否存在
-    yaml_file = get_yaml_file_path("fixed_problem_answer.yaml")
-    
-    try:
-        with open(yaml_file, 'r', encoding='utf-8') as f:
-            prompt_config = yaml.safe_load(f)
-        # 验证必需字段
-        #validate_prompt_config(prompt_config, prompt_name)
-        server_logger.info(f"成功加载[固定问题]回答系统配置.fixed_problem_answer: {prompt_config["fixed_problem_answer"]}")
-        return prompt_config
-        
-    except Exception as e:
-        server_logger.error(f"加载[固定问题]回答fixed_problem_answer文件失败: {yaml_file}, 错误: {str(e)}")
-        raise
-
-
-
-def get_intent_prompt() -> dict:
-    """
-        获取意图识别 系统提示语
-    """
-     # 构建文件路径 判断文件是否存在
-    yaml_file = get_yaml_file_path("intent_prompt.yaml")
-    
-    try:
-        with open(yaml_file, 'r', encoding='utf-8') as f:
-            prompt_config = yaml.safe_load(f)
-        # 验证必需字段
-        #validate_prompt_config(prompt_config, prompt_name)
-        server_logger.info(f"成功加载[意图识别]系统.system_prompt配置: {prompt_config["system_prompt"]}")
-        server_logger.info(f"成功加载[意图识别]系统配置.examples: {prompt_config["intent_examples"]}")
-        return prompt_config
-        
-    except Exception as e:
-        server_logger.error(f"加载意图识别intent_prompt文件失败: {yaml_file}, 错误: {str(e)}")
-        raise
-
-
-
-def get_fixed_question_intent_prompt() -> dict:
-    """
-        获取 固定问题意图识别 系统提示语
-    """
-     # 构建文件路径 判断文件是否存在
-    yaml_file = get_yaml_file_path("fixed_intent_prompt.yaml")
-    
-    try:
-        with open(yaml_file, 'r', encoding='utf-8') as f:
-            prompt_config = yaml.safe_load(f)
-        # 验证必需字段
-        #validate_prompt_config(prompt_config, prompt_name)
-        server_logger.info(f"成功加载[固定问题意图识别]系统.system_prompt配置: {prompt_config["system_prompt"]}")
-        server_logger.info(f"成功加载[固定问题意图识别]系统配置.examples: {prompt_config["fixed_problem_answer"]}")
-        return prompt_config
-        
-    except Exception as e:
-        server_logger.error(f"加载意图识别fixed_intent_prompt文件失败: {yaml_file}, 错误: {str(e)}")
-        raise
-
 
 def get_system_prompt() -> dict:
     """
@@ -116,39 +52,6 @@ def get_system_prompt() -> dict:
 
 
 
-
-def get_business_scene_prompt(trace_id, business_scene) -> tuple[BusinessSceneEnum , dict]:
-    """
-        获取业务场景的提示语
-    """
-    # 默认公共查询提示语
-    business_scene_enum = BusinessSceneEnum.COMMON_MODEL_QUERY
-    prompt_file = business_scene_enum.prompt_file
-    if not business_scene is None:
-        # 2025-07-25 修改 如果未找到 返回默认值通用场景
-        business_scene_enum = BusinessSceneEnum.get_item_by_code_def_val(business_scene , BusinessSceneEnum.CATTLE_FARM_COMMMON)
-        if not business_scene_enum:
-            raise ValueError("未找到枚举值")
-        if business_scene_enum.prompt_file is None:
-            raise ValueError("业务场景不存在")
-        prompt_file = business_scene_enum.prompt_file
-    
-    # 构建文件路径 判断文件是否存在
-    yaml_file = get_yaml_file_path(prompt_file)
-    
-    try:
-        with open(yaml_file, 'r', encoding='utf-8') as f:
-            prompt_config = yaml.safe_load(f)
-            server_logger.info(trace_id=trace_id , msg=f"business_scene_enum:{business_scene_enum.code} Get prompt successfully.")
-            return business_scene_enum , prompt_config
-    except Exception as e:
-        handler_err(server_logger, e, trace_id=trace_id, err_name="get_business_scene_prompt")
-        server_logger.error(trace_id=trace_id , msg=f"获取业务场景任务提示语失败: {e}")
-        raise e
-
-
-
-
 def get_yaml_file_path(file_name: str) -> str:
     """
         获取yaml文件路径

+ 1 - 3
views/__init__.py

@@ -14,9 +14,7 @@ from contextvars import ContextVar
 
 from fastapi import FastAPI, APIRouter
 
-from function.load_mcp_server import LoadMcpServer
 
-#mcp_server = LoadMcpServer()
 mcp_server = None
 
 @asynccontextmanager
@@ -29,7 +27,7 @@ async def lifespan(app: FastAPI):
     if mcp_server and mcp_server.cleanup:
         await mcp_server.close()
 
-cattle_router = APIRouter(prefix="/queryex", tags=["agent"])
+test_router = APIRouter(prefix="/test", tags=["agent"])
 current_operation_id: ContextVar[str] = ContextVar("operation_id", default=str(uuid.uuid4()))
 
 

+ 0 - 338
views/cattle_farm_views.py

@@ -1,338 +0,0 @@
-# !/usr/bin/ python
-# -*- coding: utf-8 -*-
-'''
-@Project    : lq-agent-api
-@File       :cattle_farm_views.py
-@IDE        :PyCharm
-@Author     :
-@Date       :2025/7/10 17:32
-'''
-import json
-from typing import Optional
-
-from fastapi import Depends, Response, Header
-from sse_starlette import EventSourceResponse
-from starlette.responses import JSONResponse
-
-from agent.agent_mcp import client
-from generate.model_generate import XiwuzcModelGenerateClient
-from logger.loggering import server_logger
-from schemas.cattle_farm import CattleFarm
-from utils import yaml_utils
-from utils.common import return_json, handler_err
-from views import cattle_router, get_operation_id
-from agent.intent import intent_identify_client
-
-
-
-def get_token(authorization: Optional[str] = Header(default=None)):
-    """提取 Bearer Token (非必填)"""
-    if authorization is None:
-        return None
-
-    scheme, _, token = authorization.partition(" ")
-    return token
-
-
-def get_tenant_id(tenant_id: Optional[str] | None = Header(None, alias="X-lq-TENANT-ID")):
-    """处理租户ID"""
-    return tenant_id
-
-
-# 路由
-
-@cattle_router.post("/chat", response_model=CattleFarm)
-async def chat_endpoint(
-        param: CattleFarm,
-        token: str = Depends(get_token),
-        tenant_id: str = Depends(get_tenant_id),
-        trace_id: str = Depends(get_operation_id)):
-    """
-    根据场景获取智能体反馈
-    """
-
-    try:
-        server_logger.info(trace_id=trace_id, msg=f"{param}")
-        # 验证参数
-
-        # 从字典中获取input
-        input_data = param.input
-        session_id = param.config.sessionId
-        business_scene = param.businessScene
-        context = param.context
-        supplementInfo = param.supplementInfo
-        header_info = {
-            "token": token,
-            "tenantId": tenant_id,
-        }
-        # 如果business_scene为None,则使用大模型进行意图识别
-        if business_scene is None:
-            business_scene = await intent_identify_client.recognize_intent(trace_id=trace_id , config=param.config , input=input_data)
-            server_logger.info(trace_id=trace_id, msg=f"使用意图识别:business_scene={business_scene}")
-
-        business_scene_enum, task_prompt_info = yaml_utils.get_business_scene_prompt(trace_id=trace_id, business_scene=business_scene)
-        final_result_data_type = task_prompt_info["final_result_data_type"]       
-        server_logger.info(trace_id=trace_id, msg=f"session_id:{session_id}, business_scene:{business_scene},final_result_data_type:{final_result_data_type} ,input_data: {input_data}",
-                           log_type="queryex")
-        # stream 流式执行
-        output = await client.handle_query(trace_id , business_scene , task_prompt_info, input_data, context, supplementInfo, header_info , param.config)
-        # 直接执行
-        server_logger.debug(trace_id=trace_id, msg=f"【result】: {output}", log_type="queryex")
-        # 返回字典格式的响应
-        return JSONResponse(
-            return_json(business_scene=business_scene, data={"output": output}, data_type=final_result_data_type, trace_id=trace_id))
-    except ValueError as err:
-        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex")
-        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
-
-    except Exception as err:
-        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex")
-        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
-
-
-@cattle_router.post("/stream", response_class=Response)
-async def chat_agent(param: CattleFarm,
-                     token: str = Depends(get_token),
-                     tenant_id: str = Depends(get_tenant_id),
-                     trace_id: str = Depends(get_operation_id)):
-    """
-    根据场景获取智能体反馈 (SSE流式响应)
-    """
-    try:
-        server_logger.info(trace_id=trace_id, msg=f"{param}")
-
-       
-       
-
-        # 提取参数
-        input_data = param.input
-        session_id = param.config.sessionId
-        user_role = param.config.userRole
-        business_scene = param.businessScene
-        context = param.context
-        supplementInfo = param.supplementInfo
-        header_info = {
-            "token": token,
-            "tenantId": tenant_id,
-        }
-          # 如果business_scene为None,则使用大模型进行意图识别
-        
-        # 获取任务提示信息
-        from enums.common_enums import BusinessSceneEnum
-        business_scene_enum, task_prompt_info = BusinessSceneEnum.COMMON_MODEL_QUERY , {"task_prompt": ""}
-        final_result_data_type = "text"      
-        server_logger.info(trace_id=trace_id, msg=f"session_id:{session_id}, business_scene:{business_scene},final_result_data_type:{final_result_data_type} ,input_data: {input_data}",
-                           log_type="queryex")
-        
-       
-        server_logger.info(trace_id=trace_id, msg=f"{param}")
-        # 创建 SSE 流式响应
-        async def event_generator():
-            try:
-                # 流式处理查询
-                async for chunk in client.handle_query_stream(
-                        trace_id=trace_id,
-                        config_param=param.config,
-                        business_scene=business_scene,
-                        task_prompt_info=task_prompt_info,
-                        input_query=input_data,
-                        context=context,
-                        supplement_info=supplementInfo,
-                        header_info=header_info
-                ):
-                    server_logger.debug(trace_id=trace_id, msg=f"{chunk}")
-                    # 发送数据块
-                    yield {
-                        "event": "message",
-                        "data": json.dumps({
-                            "code": 0,
-                            "output": chunk,
-                            "completed": False,
-                            "trace_id": trace_id,
-                            "dataType": final_result_data_type,
-                            "business_scene": business_scene,
-                        }, ensure_ascii=False)
-                    }
-                # 获取缓存数据
-                result_data = await client.get_redis_result_cache_data(trace_id=trace_id)
-                # 发送结束事件
-                yield {
-                    "event": "message_end",
-                    "data": json.dumps({
-                        "completed": True,
-                        "message": json.dumps(result_data, ensure_ascii=False),
-                        "code": 0,
-                        "trace_id": trace_id,
-                        "dataType": "text",
-                        "business_scene": business_scene,
-                    }, ensure_ascii=False),
-                }
-            except Exception as e:
-                # 错误处理
-                yield {
-                    "event": "error",
-                    "data": json.dumps({
-                        "trace_id": trace_id,
-                        "message": str(e),
-                        "code": 1,
-                        "dataType": "text",
-                        "business_scene": business_scene,
-                    }, ensure_ascii=False)
-                }
-            finally:
-                # 不需要关闭客户端,因为它是单例
-                pass
-
-        # 返回 SSE 响应
-        return EventSourceResponse(
-            event_generator(),
-            headers={
-                "Cache-Control": "no-cache",
-                "Connection": "keep-alive"
-            }
-        )
-
-    except Exception as err:
-        # 初始错误处理
-        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex")
-        return JSONResponse(
-            return_json(code=1, msg=f"{err}", trace_id=trace_id),
-            status_code=500
-        )
-
-
-@cattle_router.post("/generate/ai_stream", response_class=Response)
-def chat_stream(param: CattleFarm,
-        token: str = Depends(get_token),
-        tenant_id: str = Depends(get_tenant_id),
-        trace_id: str = Depends(get_operation_id)):
-    try:
-        server_logger.info(trace_id=trace_id, msg=f"{param}")
-
-        # 提取参数
-        input_data = param.input
-        session_id = param.config.sessionId
-        business_scene = param.businessScene
-        context = param.context
-        supplementInfo = param.supplementInfo
-        header_info = {
-            "token": token,
-            "tenantId": tenant_id,
-        }
-          # 如果business_scene为None,则使用大模型进行意图识别
-        if business_scene is None:
-            business_scene = intent_identify_client.recognize_intent(input_data)
-            server_logger.info(trace_id=trace_id, msg=f"使用意图识别:business_scene={business_scene}")
-
-        # 获取系统提示
-        business_scene_enum, task_prompt_info = yaml_utils.get_business_scene_prompt(trace_id=trace_id, business_scene=business_scene)
-        server_logger.info(trace_id=trace_id, msg=f"session_id:{session_id}, business_scene:{business_scene} , business_scene_enum:{business_scene_enum} ,input_data: {input_data}",
-                           log_type="queryex")
-        xwzc_generate_client = XiwuzcModelGenerateClient()
-        # 创建 SSE 流式响应
-        async def event_generator():
-            try:
-                # 流式处理查询
-                for chunk in xwzc_generate_client.get_model_generate_stream(task_prompt_info, session_id, input_data,
-                                                                            context, supplementInfo):
-                    # 发送数据块
-                    yield {
-                        "event": "message",
-                        "data": json.dumps({
-                            "output": chunk,
-                            "completed": False,
-                            "trace_id": trace_id,
-                            "dataType": business_scene_enum.data_type
-                        })
-                    }
-
-                # 发送结束事件
-                yield {
-                    "event": "message_end",
-                    "data": json.dumps({
-                        "completed": True,
-                        "message": "Stream completed",
-                        "code": 0,
-                        "trace_id": trace_id
-                    }),
-
-                }
-            except Exception as e:
-                # 错误处理
-                yield {
-                    "event": "error",
-                    "data": json.dumps({
-                        "trace_id": trace_id,
-                        "msg": str(e),
-                        "code": 1,
-                        "dataType": "text"
-                    })
-                }
-
-        # 返回 SSE 响应
-        return EventSourceResponse(
-            event_generator(),
-            headers={
-                "Cache-Control": "no-cache",
-                "Connection": "keep-alive"
-            }
-        )
-
-    except Exception as err:
-        # 初始错误处理
-        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex")
-        return JSONResponse(
-            return_json(code=1, msg=f"{err}", trace_id=trace_id),
-            status_code=500
-        )
-
-
-
-
-@cattle_router.post("/generate/tools/execute", response_model=CattleFarm)
-async def chat_generate_tools_endpoint(
-        param: CattleFarm,
-        token: str = Depends(get_token),
-        tenant_id: str = Depends(get_tenant_id),
-        trace_id: str = Depends(get_operation_id)):
-    """
-        工具调用
-    """
-
-    try:
-        server_logger.info(trace_id=trace_id, msg=f"{param}")
-        # 验证参数
-
-        # 从字典中获取input
-        input_data = param.input
-        session_id = param.config.sessionId
-        business_scene = param.businessScene
-        context = param.context
-        supplementInfo = param.supplementInfo
-        header_info = {
-            "token": token,
-            "tenantId": tenant_id,
-        }
-        # 如果business_scene为None,则使用大模型进行意图识别
-        if business_scene is None:
-            business_scene = intent_identify_client.recognize_intent(input_data)
-            server_logger.info(trace_id=trace_id, msg=f"使用意图识别:business_scene={business_scene}")
-
-        business_scene_enum, task_prompt_info = yaml_utils.get_business_scene_prompt(trace_id=trace_id, business_scene=business_scene)
-        server_logger.info(trace_id=trace_id, msg=f"session_id:{session_id}, business_scene:{business_scene} , business_scene_enum:{business_scene_enum} ,input_data: {input_data}",
-                           log_type="queryex/tools2")
-        xwzc_generate_client = XiwuzcModelGenerateClient()
-        # stream 流式执行
-        output = await xwzc_generate_client.get_model_tools_call(trace_id, session_id, task_prompt_info, input_data, context, supplementInfo, header_info)
-        # 直接执行
-        server_logger.debug(trace_id=trace_id, msg=f"【result】: {output}", log_type="queryex/tools2")
-        # 返回字典格式的响应
-        return JSONResponse(
-            return_json(data={"output": output}, data_type=business_scene_enum.data_type, trace_id=trace_id))
-    except ValueError as err:
-        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex/tools2")
-        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
-
-    except Exception as err:
-        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex/tools2")
-        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))

+ 0 - 71
views/fixed_answer.py

@@ -1,71 +0,0 @@
-
-
-
-import asyncio
-from sse_starlette import EventSourceResponse
-from utils.yaml_utils import fixed_question_intent_config
-from logger.loggering import server_logger
-from agent.fixed_intent import fixed_intent_identify_client
-from utils.tool_utils import get_fixed_problem_answer_txt_content ,verify_user_role
-
-
-
-
-def get_fixed_problem_answer_txt(trace_id, input , user_role):
-    """
-        获取固定答案
-        
-    """
-    # 验证用户角色,租户才能查询固定问题
-    flag_tenant = verify_user_role(user_role)
-    if not flag_tenant:
-        # 普通用户直接返回
-        return False , input
-
-    result = fixed_intent_identify_client.recognize_intent(input)
-    server_logger.info(trace_id=trace_id, msg=f"固定问题意图识别系统: input={input}, result={result}")
-    # 获取固定问题 配置列表,判断意图识别是否在配置列表中
-    if result not in get_fixed_problem_answer_recognize_out_list(trace_id=trace_id):
-        # 不在 配置列表中
-        return False , input
-    
-    answer_result = get_fixed_problem_answer_txt_content(result)
-    return True , answer_result
-
-
-
-def get_fixed_problem_answer_recognize_out_list(trace_id: str):
-    """
-        获取固定问题 意图识别结果配置列表
-        如:
-        [
-            "question_1",
-            "question_2",
-            "question_3",
-            "question_4",
-            "question_5",
-            "question_6",
-            "question_7",
-            "question_8",
-            "question_9",
-        ]
-    """
-    fixed_problem_answer_list = fixed_question_intent_config["fixed_problem_answer"]
-    recognize_intent_out_list = list(map(lambda x: x["out"], fixed_problem_answer_list))
-    server_logger.info(trace_id=trace_id, msg=f"配置固定问题意图识别结果列表: recognize_intent_out_list={recognize_intent_out_list}")
-    return recognize_intent_out_list
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-

+ 255 - 0
views/test_views.py

@@ -0,0 +1,255 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :cattle_farm_views.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/10 17:32
+'''
+import json
+from typing import Optional
+
+from fastapi import Depends, Response, Header
+from sse_starlette import EventSourceResponse
+from starlette.responses import JSONResponse
+
+from agent.test_agent import test_agent_client
+from agent.generate.model_generate import test_generate_model_client
+from logger.loggering import server_logger
+from schemas.test_schemas import TestForm
+from utils.common import return_json, handler_err
+from views import test_router, get_operation_id
+
+
+
+
+@test_router.post("/generate/chat", response_model=TestForm)
+async def generate_chat_endpoint(
+        param: TestForm,
+        trace_id: str = Depends(get_operation_id)):
+    """
+        生成类模型
+    """
+    try:
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+
+        # 从字典中获取input
+        input_query = param.input
+        session_id = param.config.session_id
+        context = param.context
+        header_info = {
+        }
+        task_prompt_info = {"task_prompt": ""}
+        output = test_generate_model_client.get_model_generate_invoke(trace_id , task_prompt_info, 
+                                                                                 input_query, context)
+        # 直接执行
+        server_logger.debug(trace_id=trace_id, msg=f"【result】: {output}", log_type="agent/chat")
+        # 返回字典格式的响应
+        return JSONResponse(
+            return_json(data={"output": output}, data_type="text", trace_id=trace_id))
+
+    except ValueError as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="generate/stream")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
+
+    except Exception as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="generate/stream")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
+
+
+@test_router.post("/generate/stream", response_model=TestForm)
+async def generate_stream_endpoint(
+        param: TestForm,
+        trace_id: str = Depends(get_operation_id)):
+    """
+        生成类模型
+    """
+    try:
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+
+        # 从字典中获取input
+        input_query = param.input
+        session_id = param.config.session_id
+        context = param.context
+        header_info = {
+        }
+        task_prompt_info = {"task_prompt": ""}
+        # 创建 SSE 流式响应
+        async def event_generator():
+            try:
+                # 流式处理查询 trace_id, task_prompt_info: dict, input_query, context=None
+                for chunk in test_generate_model_client.get_model_generate_stream(trace_id , task_prompt_info, 
+                                                                                 input_query, context):
+                    # 发送数据块
+                    yield {
+                        "event": "message",
+                        "data": json.dumps({
+                            "output": chunk,
+                            "completed": False,
+                        }, ensure_ascii=False)
+                    }
+                     # 获取缓存数据
+                result_data = {}
+                # 发送结束事件
+                yield {
+                    "event": "message_end",
+                    "data": json.dumps({
+                        "completed": True,
+                        "message": json.dumps(result_data, ensure_ascii=False),
+                        "code": 0,
+                        "trace_id": trace_id,
+                    }, ensure_ascii=False),
+                }
+            except Exception as e:
+                # 错误处理
+                yield {
+                    "event": "error",
+                    "data": json.dumps({
+                        "trace_id": trace_id,
+                        "message": str(e),
+                        "code": 1
+                    }, ensure_ascii=False)
+                }
+            finally:
+                # 不需要关闭客户端,因为它是单例
+                pass
+        # 返回 SSE 响应
+        return EventSourceResponse(
+            event_generator(),
+            headers={
+                "Cache-Control": "no-cache",
+                "Connection": "keep-alive"
+            }
+        )
+    except ValueError as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="generate/stream")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
+
+    except Exception as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="generate/stream")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
+
+# 路由
+
+@test_router.post("/agent/chat", response_model=TestForm)
+async def chat_endpoint(
+        param: TestForm,
+        trace_id: str = Depends(get_operation_id)):
+    """
+    根据场景获取智能体反馈
+    """
+
+    try:
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+        # 验证参数
+
+        # 从字典中获取input
+        input_data = param.input
+        session_id = param.config.session_id
+        context = param.context
+        header_info = {
+        }
+        task_prompt_info = {"task_prompt": ""}
+  
+        # stream 流式执行
+        output = await test_agent_client.handle_query(trace_id  , task_prompt_info, input_data, context, param.config)
+        # 直接执行
+        server_logger.debug(trace_id=trace_id, msg=f"【result】: {output}", log_type="agent/chat")
+        # 返回字典格式的响应
+        return JSONResponse(
+            return_json(data={"output": output}, data_type="text", trace_id=trace_id))
+    except ValueError as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="agent/chat")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
+
+    except Exception as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="agent/chat")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
+
+
+@test_router.post("/agent/stream", response_class=Response)
+async def chat_agent_stream(param: TestForm,
+                     trace_id: str = Depends(get_operation_id)):
+    """
+    根据场景获取智能体反馈 (SSE流式响应)
+    """
+    try:
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+
+       
+        # 提取参数
+        input_data = param.input
+        context = param.context
+        header_info = {
+          
+        }
+        task_prompt_info = {"task_prompt": ""}
+          # 如果business_scene为None,则使用大模型进行意图识别
+
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+        # 创建 SSE 流式响应
+        async def event_generator():
+            try:
+                # 流式处理查询
+                async for chunk in test_agent_client.handle_query_stream(
+                        trace_id=trace_id,
+                        config_param=param.config,
+                        task_prompt_info=task_prompt_info,
+                        input_query=input_data,
+                        context=context,
+                        header_info=header_info
+                ):
+                    server_logger.debug(trace_id=trace_id, msg=f"{chunk}")
+                    # 发送数据块
+                    yield {
+                        "event": "message",
+                        "data": json.dumps({
+                            "code": 0,
+                            "output": chunk,
+                            "completed": False,
+                            "trace_id": trace_id,
+                        }, ensure_ascii=False)
+                    }
+                # 获取缓存数据
+                result_data = {}
+                # 发送结束事件
+                yield {
+                    "event": "message_end",
+                    "data": json.dumps({
+                        "completed": True,
+                        "message": json.dumps(result_data, ensure_ascii=False),
+                        "code": 0,
+                        "trace_id": trace_id,
+                    }, ensure_ascii=False),
+                }
+            except Exception as e:
+                # 错误处理
+                yield {
+                    "event": "error",
+                    "data": json.dumps({
+                        "trace_id": trace_id,
+                        "message": str(e),
+                        "code": 1
+                    }, ensure_ascii=False)
+                }
+            finally:
+                # 不需要关闭客户端,因为它是单例
+                pass
+
+        # 返回 SSE 响应
+        return EventSourceResponse(
+            event_generator(),
+            headers={
+                "Cache-Control": "no-cache",
+                "Connection": "keep-alive"
+            }
+        )
+
+    except Exception as err:
+        # 初始错误处理
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="agent/stream")
+        return JSONResponse(
+            return_json(code=1, msg=f"{err}", trace_id=trace_id),
+            status_code=500
+        )