Jelajahi Sumber

初始框架提交

lingmin_package@163.com 5 bulan lalu
induk
melakukan
551ffa2aed
54 mengubah file dengan 4664 tambahan dan 2 penghapusan
  1. 28 0
      Dockerfile
  2. 25 2
      README.md
  3. 11 0
      agent/__init__.py
  4. 288 0
      agent/agent_mcp.py
  5. 199 0
      agent/base_agent.py
  6. 84 0
      agent/cus_streamer.py
  7. 99 0
      agent/fixed_intent.py
  8. 9 0
      agent/generator_title.py
  9. 125 0
      agent/intent.py
  10. 10 0
      agent/session/__init__.py
  11. 344 0
      agent/session/session_manager.py
  12. 71 0
      base/async_redis_lock.py
  13. 35 0
      base/config.py
  14. 39 0
      base/redis_config.py
  15. 212 0
      base/redis_connection.py
  16. 67 0
      base/redis_lock.py
  17. TEMPAT SAMPAH
      config/.DS_Store
  18. 57 0
      config/config.ini
  19. 8 0
      config/mulian_servers_config.json
  20. 10 0
      config/prompt/common_model_query.yaml
  21. 9 0
      config/prompt/fixed_intent_prompt.yaml
  22. 20 0
      config/prompt/intent_prompt.yaml
  23. 19 0
      config/prompt/system_prompt.yaml
  24. 22 0
      config/servers_config.json
  25. 139 0
      core/__init__.py
  26. 155 0
      enums/common_enums.py
  27. 34 0
      function/knowledge_dify.py
  28. 91 0
      function/load_mcp_server.py
  29. 50 0
      function/local_function.py
  30. 9 0
      generate/__init__.py
  31. 178 0
      generate/model_generate.py
  32. 35 0
      gunicorn_config.py
  33. 145 0
      logger/loggering.py
  34. 179 0
      requirements.txt
  35. 64 0
      run.sh
  36. 97 0
      schemas/__init__.py
  37. 27 0
      schemas/cattle_farm.py
  38. 46 0
      server/app.py
  39. 10 0
      server/cus_middlewares.py
  40. 54 0
      test/test_redis.py
  41. 48 0
      test/test_redis2.py
  42. 88 0
      test/test_redis3.py
  43. 128 0
      test/test_utils.py
  44. 32 0
      test/test_yaml.py
  45. 76 0
      test/问题/流式推理打印推理过程.txt
  46. 76 0
      utils/common.py
  47. 63 0
      utils/redis_utils.py
  48. 114 0
      utils/request_tool.py
  49. 116 0
      utils/tool_utils.py
  50. 205 0
      utils/utils.py
  51. 166 0
      utils/yaml_utils.py
  52. 39 0
      views/__init__.py
  53. 338 0
      views/cattle_farm_views.py
  54. 71 0
      views/fixed_answer.py

+ 28 - 0
Dockerfile

@@ -0,0 +1,28 @@
+FROM python:3.13-slim
+
+ENV DEBIAN_FRONTEND=noninteractive \
+    TZ=Asia/Shanghai
+
+# 安装系统依赖包并创建虚拟环境
+RUN chmod 777 /tmp \
+    && python -m venv /venv
+
+ENV PATH="/venv/bin:$PATH"
+
+# 先复制 requirements 文件安装依赖(利用缓存)
+COPY requirements.txt /tmp/
+RUN /venv/bin/pip config set global.index-url https://mirrors.aliyun.com/pypi/simple \
+    && /venv/bin/pip config set install.trusted-host mirrors.aliyun.com \
+    && /venv/bin/pip --default-timeout=1800 install -r /tmp/requirements.txt \
+    && rm -rf /root/.cache
+
+# 设置工作目录并复制项目文件
+WORKDIR /app
+COPY . /app
+
+EXPOSE 8001
+# 确保脚本可执行
+RUN chmod 777 run.sh
+
+# 使用虚拟环境运行脚本
+CMD ["/venv/bin/gunicorn", "-c", "gunicorn_config.py", "server.app:app"]

+ 25 - 2
README.md

@@ -1,3 +1,26 @@
-# LQAgentPlatform
+### LQAgentPlatform
 
- 测试修改
+ 
+
+ ### LQAgentPlatform 智能体应用服务
+ - 环境安装
+   - pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
+ - 后端启动服务
+    - cd LQAgentPlatform
+    - uvicorn server.app:app --port=8001 --host=0.0.0.0
+    - gunicorn -c gunicorn_config.py server.app:app       多进程启动
+
+
+    pip install aioredis -i https://mirrors.aliyun.com/pypi/simple/
+    pip install langgraph-checkpoint-postgres -i https://mirrors.aliyun.com/pypi/simple/
+    pip install langchain-redis -i https://mirrors.aliyun.com/pypi/simple/
+
+
+
+http://localhost:8001/queryex/stream
+{
+  "config": {
+      "sessionId":"111"
+  },
+  "input": "你好"
+}

+ 11 - 0
agent/__init__.py

@@ -0,0 +1,11 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :__init__.py.py
+@IDE        :PyCharm
+@Author     : 
+@Date       :2025/7/14 15:04
+'''
+
+

+ 288 - 0
agent/agent_mcp.py

@@ -0,0 +1,288 @@
+# !/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :agent_mcp.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/21 10:12
+'''
+import json
+import trace
+
+from langgraph.prebuilt import create_react_agent
+from sqlalchemy.sql.functions import user
+from logger.loggering import server_logger
+from utils.common import handler_err
+from utils.utils import get_models
+from utils.yaml_utils import system_prompt_config
+from views import mcp_server
+
+import threading
+import time
+from typing import Dict, List, Optional, AsyncGenerator, Any, OrderedDict
+from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
+from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
+from langchain_core.runnables import RunnableConfig
+from agent.base_agent import BaseAgent
+from agent.session.session_manager import SessionManager
+from function.local_function import get_knowledge_answer
+from schemas.cattle_farm import FarmConfig
+
+
+class XiwuzcAgentMcpClient(BaseAgent):
+    """
+    Xiwuzc 智能助手+MCP(带完整会话管理) - 针对场景优化
+    添加会话锁定机制,确保同一时间只有一个客户端可以使用特定会话
+    """
+    # 单例实例和线程锁
+    _instance = None
+    _singleton_lock = threading.Lock()
+
+    def __new__(cls):
+        """线程安全的单例模式实现"""
+        if cls._instance is None:
+            with cls._singleton_lock:
+                if cls._instance is None:
+                    cls._instance = super().__new__(cls)
+                    cls._instance._initialize()
+        return cls._instance
+
+    def _initialize(self):
+        """初始化模型和会话管理"""
+        llm, chat, embed = get_models()
+        self.llm = llm
+        self.chat = chat
+        self.embed = embed
+        self.agent_executor = None
+        self.initialized = False
+        self.psutil_available = True
+
+        # 固定系统提示词
+        self.system_prompt = system_prompt_config["system_prompt"]
+
+        # 清理任务
+        self.cleanup_task = None
+        server_logger.info("mulian client initialized")
+
+    async def init_agent(self):
+        """初始化agent_executor(只需一次)"""
+        if self.initialized:
+            return
+
+        # 获取部署的模型列表
+        server_logger.info(f"系统提示词 system_prompt:{self.system_prompt}")
+
+        # 创建提示词模板 - 使用固定的系统提示词
+        prompt = ChatPromptTemplate.from_messages([
+            ("system", self.system_prompt),
+            MessagesPlaceholder(variable_name="messages"),
+            ("placeholder", "{agent_scratchpad}")
+        ])
+
+        # 创建Agent - 不再使用MemorySaver
+        self.agent_executor = create_react_agent(
+            self.llm,
+            tools=[] ,  # 专用工具集 + 私有知识库检索工具
+            prompt=prompt
+        )
+        self.initialized = True
+        server_logger.info("mulian agent initialized")
+
+
+    async def handle_query(self, trace_id: str, business_scene: str, task_prompt_info: dict, input_query, context=None,
+                           supplement_info=None, header_info=None , config_param: FarmConfig = None):
+        try:
+            # 确保agent已初始化
+            if not self.initialized:
+                await self.init_agent()
+            
+            session_id = config_param.sessionId
+            user_role = config_param.userRole
+            # 构建会话管理
+            session_manager = SessionManager(lock_key_prefix="Chat_", trace_id=trace_id , session_id=session_id)
+            # 尝试获取会话锁(5秒超时)
+            if not await session_manager.acquire_session_lock():
+                return "错误:此会话已在其他设备登录,请使用新会话或等待解锁"
+
+            try:
+                # 构建输入消息
+                input_message , input_summary_context = self.get_input_context(
+                    trace_id=trace_id,
+                    business_scene=business_scene,
+                    task_prompt_info=task_prompt_info,
+                    input_query=input_query,
+                    context=context,
+                    supplement_info=supplement_info,
+                    header_info=header_info,
+                    config_param={
+                        "session_id": session_id,
+                        "user_role": user_role
+                    }
+                )
+                # 用于模型对话使用
+                input_human_message = HumanMessage(content=input_message)
+                # 用于对话历史记录摘要 
+                input_human_summary_message = HumanMessage(content=input_summary_context)
+                # 获取历史消息
+                history_messages = await session_manager.get_memory_history()
+                # 构造完整的消息列表
+                all_messages = list(history_messages) + [input_human_message]
+
+                # 配置执行上下文
+                config = RunnableConfig(
+                    configurable={"thread_id": session_id},
+                    runnable_kwargs={"recursion_limit": 15}
+                )
+
+                # 执行智能体
+                events = self.agent_executor.astream(
+                    {"messages": all_messages},
+                    config=config,
+                    stream_mode="values"
+                )
+
+                # 处理结果
+                full_response = []
+                async for event in events:
+                    if isinstance(event["messages"][-1], AIMessage):
+                        chunk = event["messages"][-1].content
+                        full_response.append(chunk)
+                    log_content = self.get_pretty_message_str(event["messages"][-1])
+                    server_logger.info("\n" + log_content.strip(), trace_id=trace_id)
+
+                if full_response:
+                    full_text = "".join(full_response)
+                    server_logger.info(trace_id=trace_id, msg=f"full_response: {full_text}")
+                    # 保存并更新历史会话记录history
+                    await session_manager.save_update_memory_history(history_messages , input_human_summary_message, AIMessage(content=full_text))
+                    full_text = self.clean_json_output(full_text)
+                    return full_text
+            finally:
+                # 确保释放会话锁
+                await session_manager.release_session_lock()
+        except PermissionError as e:
+            # 处理会话被其他设备锁定的情况
+            return str(e)
+        except Exception as e:
+            handler_err(server_logger, trace_id=trace_id, err=e, err_name='query')
+            return f"系统错误: {str(e)}"
+
+
+    async def handle_query_stream(
+            self,
+            trace_id: str,
+            business_scene: str,
+            task_prompt_info: dict,
+            input_query: str,
+            context: Optional[str] = None,
+            supplement_info: Optional[str] = None,
+            header_info: Optional[Dict] = None,
+            config_param: FarmConfig = None,
+    ) -> AsyncGenerator[str, None]:
+        """流式处理查询(优化缓冲管理)"""
+        try:
+            # 确保agent已初始化
+            if not self.initialized:
+                await self.init_agent()
+            
+            session_id = config_param.sessionId
+            user_role = config_param.userRole
+
+            # 构建会话管理
+            session_manager = SessionManager(lock_key_prefix="ChatStream_", trace_id=trace_id , session_id=session_id)
+            # 尝试获取会话锁(5秒超时)
+            if not await session_manager.acquire_session_lock():
+                yield json.dumps({"error": "此会话已在其他设备登录"})
+                return
+        
+            try:
+                # 构建输入消息
+                input_message , input_summary_context = self.get_input_context(
+                    trace_id=trace_id,
+                    business_scene=business_scene,
+                    task_prompt_info=task_prompt_info,
+                    input_query=input_query,
+                    context=context,
+                    supplement_info=supplement_info,
+                    header_info=header_info,
+                     config_param={
+                        "session_id": session_id,
+                        "user_role": user_role
+                    }
+                )
+                server_logger.info(trace_id=trace_id, msg=f"input_context: {input_message}")
+                # 用于模型对话使用
+                input_human_message = HumanMessage(content=input_message)
+                # 用于对话历史记录摘要 
+                input_human_summary_message = HumanMessage(content=input_summary_context)
+                 # 获取历史消息
+                history_messages = await session_manager.get_memory_history()
+                # 构造完整的消息列表
+                all_messages = list(history_messages) + [input_human_message]
+                # 配置执行上下文
+                config = RunnableConfig(
+                    configurable={"thread_id": session_id},
+                    runnable_kwargs={"recursion_limit": 15}
+                )
+
+                # 流式执行
+                events = self.agent_executor.astream_events(
+                    {"messages": all_messages},
+                    config=config,
+                    stream_mode="values"
+                )
+
+                full_response = []
+                buffer = []
+                last_flush_time = time.time()
+
+                # 流式处理事件
+                async for event in events:
+                    # 只在特定事件类型时打印日志
+                    self.log_stream_pretty_message(trace_id=trace_id, event=event)
+                   
+                    if 'chunk' in event['data'] and "on_chat_model_stream" in event['event']:
+                        chunk = event['data']['chunk'].content
+                        full_response.append(chunk)
+
+                        # 缓冲管理策略
+                        buffer.append(chunk)
+                        current_time = time.time()
+
+                        # 满足以下任一条件即刷新缓冲区
+                        if (len(buffer) >= 3 or  # 达到最小块数
+                                (current_time - last_flush_time) > 0.5 or  # 超时
+                                any(chunk.endswith((c, f"{c} ")) for c in
+                                    ['.', '。', '!', '?', '\n', ';', ';'])):  # 自然断点
+
+                            # 合并并发送缓冲内容
+                            combined = ''.join(buffer)
+                            yield combined
+
+                            # 重置缓冲
+                            buffer.clear()
+                            last_flush_time = current_time
+
+                # 处理剩余内容
+                if buffer:
+                    yield ''.join(buffer)
+
+                # 将完整响应添加到历史并进行压缩
+                if full_response:
+                    full_text = "".join(full_response)
+                    server_logger.info(trace_id=trace_id, msg=f"full_response: {full_text}")
+                     # 保存并更新历史会话记录history
+                    await session_manager.save_update_memory_history(history_messages , input_human_summary_message, AIMessage(content=full_text))
+            finally:
+                # 确保释放会话锁
+                await session_manager.release_session_lock()
+
+        except PermissionError as e:
+            yield json.dumps({"error": str(e)})
+        except Exception as e:
+            handler_err(server_logger, trace_id=trace_id, err=e, err_name='query_stream')
+            yield json.dumps({"error": f"系统错误: {str(e)}"})
+
+
+client = XiwuzcAgentMcpClient()

+ 199 - 0
agent/base_agent.py

@@ -0,0 +1,199 @@
+
+# !/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :base_agent.py
+@IDE        :Cursor
+@Author     : 
+@Date       :2025/7/26 05:00
+'''
+from datetime import datetime
+from io import StringIO
+from contextlib import redirect_stdout
+from typing import Dict, List, Optional, AsyncGenerator, Any, OrderedDict
+from logger.loggering import server_logger
+from utils.redis_utils import get_redis_result_cache_data_and_delete_key
+from enums.common_enums import UserRoleEnum
+
+class BaseAgent:
+    """
+     基础智能助手类
+    """
+
+    def __init__(self):
+        pass
+
+
+    def get_pretty_message_str(self, message) -> str:
+        """安全地捕获 pretty_print() 的输出"""
+        captured_output = StringIO()
+        with redirect_stdout(captured_output):
+            message.pretty_print()
+        return captured_output.getvalue()
+
+    
+    def log_stream_pretty_message(self , trace_id , event):
+        """
+            流式打印agent 整个推理过程 pretty_print() 的输出
+        """
+        event_type = event.get('event', '')
+        name = event.get('name', '')
+        data = event.get('data', {})
+        if event_type not in ['on_chain_start', 'on_chain_end', 'on_tool_start', 'on_tool_end', 'on_chat_model_start']:
+            return 
+        
+        server_logger.info(trace_id=trace_id , msg=f"\n================================= {event_type} ({name}) =================================")
+        if 'messages' in event:
+            for msg in event['messages']:
+                #msg.pretty_print()
+                output = self.get_pretty_message_str(msg)
+                server_logger.info(trace_id=trace_id , msg=f"\n{output}")
+        elif 'chunk' in data:
+            chunk = data['chunk']
+            if hasattr(chunk, 'content') and chunk.content:
+                server_logger.info(trace_id=trace_id , msg=f"Content: {chunk.content}")
+            if hasattr(chunk, 'tool_calls') and chunk.tool_calls:
+                server_logger.info(trace_id=trace_id , msg=f"Tool calls: {chunk.tool_calls}")
+        elif 'output' in data:
+            output = data['output']
+            if hasattr(output, 'pretty_print'):
+                #output.pretty_print()
+                output = self.get_pretty_message_str(output)
+                server_logger.info(trace_id=trace_id , msg=f"\n{output}")
+            else:
+                server_logger.info(trace_id=trace_id , msg=f"Output: {output}")
+
+
+
+    def get_input_context(
+            self,
+            trace_id: str,
+            business_scene: str,
+            task_prompt_info: dict,
+            input_query: str,
+            context: Optional[str] = None,
+            supplement_info: Optional[str] = None,
+            header_info: Optional[Dict] = None , 
+            config_param: Optional[dict] = None
+    ) -> tuple[str,str]:
+        """构建场景优化的上下文提示"""
+        context = context or "无相关数据"
+        supplement_info = supplement_info or "无补充信息"
+        token = header_info.get('token', '') if header_info else ''
+        tenantId = header_info.get('tenantId', '') if header_info else ''
+        user_role = config_param.get('user_role', UserRoleEnum.COMMON.code) if config_param else UserRoleEnum.COMMON.code
+        task_prompt_info_str = task_prompt_info["task_prompt"]
+        call_tools_return_data_type = "text"
+        final_result_data_type = "text"
+        # 如果配置按配置要求,如果未配置默认
+        call_tools_return_data_type = call_tools_return_data_type if call_tools_return_data_type else "text"
+        final_result_data_type = final_result_data_type if final_result_data_type else "Markdown"
+
+        # 场景优化的上下文模板
+        context_template = """
+        助手会话 [ID: {trace_id}] 
+        时间: {timestamp}
+        任务: {task_prompt_info_str}
+        
+
+        用户提供上下文信息:
+        {context}
+
+        用户补充信息:
+        {supplement_info}
+
+        用户输入问题:
+        {input}
+        
+        用户角色: {user_role}
+        安全验证: {token}
+        场ID: {tenantId}
+        """
+
+        input_context = context_template.format(
+            trace_id=trace_id,
+            business_scene=business_scene,
+            task_prompt_info_str=task_prompt_info_str,
+            context=context,
+            input=input_query,
+            call_tools_return_data_type=call_tools_return_data_type,
+            final_result_data_type=final_result_data_type,
+            supplement_info=supplement_info,
+            user_role=user_role,
+            token=token,
+            tenantId=tenantId,
+            timestamp=datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+        )
+        
+
+          # 场景优化的上下文模板
+        summary_context_template = """
+        助手会话 [ID: {trace_id}] 
+        用户意图场景: {business_scene}
+        上下文信息:
+        {context}
+        补充信息:
+        {supplement_info}
+        用户问题:
+        {input}
+        用户角色: {user_role}
+        安全验证: {token}
+        场ID: {tenantId}
+        """
+
+        input_summary_context = summary_context_template.format(
+            trace_id=trace_id,
+            business_scene=business_scene,
+            context=context,
+            input=input_query,
+            supplement_info=supplement_info,
+            user_role=user_role,
+            token=token,
+            tenantId=tenantId
+        )
+        return input_context , input_summary_context
+
+
+    def clean_json_output(self, raw_output: str) -> str:
+        """去除开头和结尾的 ```json 和 ```"""
+        cleaned = raw_output.strip()
+        if cleaned.startswith("```json"):
+            cleaned = cleaned[7:]  # 去掉开头的 ```json
+        if cleaned.endswith("```"):
+            cleaned = cleaned[:-3]  # 去掉结尾的 ```
+        return cleaned.strip()
+
+
+    
+    async def get_redis_result_cache_data(self , trace_id: str):
+        """
+            获取redis结果缓存数据
+            @param data_type: 数据类型,
+                基本信息 cattle_info
+                体温信息 cattle_temperature 
+                步数信息 cattle_walk
+                知识库检索溯源信息 retriever_resources
+            @param trace_id: 链路跟踪ID
+        """
+        # 基本信息
+        data_type = "cattle_info"
+        cattle_info = await get_redis_result_cache_data_and_delete_key(data_type=data_type , trace_id=trace_id)
+
+        data_type = "cattle_temperature"
+        cattle_temperature = await get_redis_result_cache_data_and_delete_key(data_type=data_type , trace_id=trace_id)
+
+        data_type = "cattle_walk"
+        cattle_walk = await get_redis_result_cache_data_and_delete_key(data_type=data_type , trace_id=trace_id)
+
+        data_type = "retriever_resources"
+        retriever_resources = await get_redis_result_cache_data_and_delete_key(data_type=data_type , trace_id=trace_id)
+        return {
+            "cattle_info": cattle_info,
+            "cattle_temperature": cattle_temperature,
+            "cattle_walk": cattle_walk,
+            "retriever_resources": retriever_resources
+        }
+
+
+

+ 84 - 0
agent/cus_streamer.py

@@ -0,0 +1,84 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :cus_streamer.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/14 12:04
+'''
+from langchain_core.messages import HumanMessage
+from typing import AsyncGenerator
+
+import asyncio
+
+
+class AdaptiveStreamer:
+    def __init__(self, min_chunk: int = 256, max_chunk: int = 4096, initial_chunk: int = 4):
+        self.min_chunk = min_chunk
+        self.max_chunk = max_chunk
+        self.chunk_size = initial_chunk
+        self.buffer = ""
+        self.last_latency = 0.0
+
+    async def astream(self, model, prompt: str, config, stream_mode="values") -> AsyncGenerator[bytes, None]:
+        """
+        自适应流式输出
+        """
+        async for langchain_chunk in model.astream({"messages": [HumanMessage(content=prompt)]}, config=config, stream_mode=stream_mode):
+            # 检查是否有内容属性
+            if not hasattr(langchain_chunk['messages'][-1], 'content') or not langchain_chunk['messages'][-1].content:
+                continue  # 跳过空内容块
+
+            if isinstance(langchain_chunk['messages'][-1], HumanMessage):
+                continue
+
+            try:
+                # 添加到缓冲区
+                chunk_bytes = langchain_chunk['messages'][-1].content
+                self.buffer += chunk_bytes
+                # 处理缓冲区
+                while len(self.buffer) >= self.chunk_size:
+                    # 提取块
+                    output_chunk = self.buffer[:self.chunk_size]
+                    self.buffer = self.buffer[self.chunk_size:]
+
+                    # 记录发送时间
+                    start_time = asyncio.get_event_loop().time()
+                    yield output_chunk
+                    send_duration = asyncio.get_event_loop().time() - start_time
+
+                    # 基于发送时间调整块大小
+                    self.adjust_chunk_size(send_duration)
+
+            except Exception as e:
+                # 处理编码或其他错误
+                error_msg = f"[错误] {str(e)}".encode('utf-8')
+                yield error_msg
+                continue  # 继续处理后续块
+
+        # 发送剩余内容
+        if self.buffer:
+            yield self.buffer
+            self.buffer = ""
+
+    def adjust_chunk_size(self, send_duration: float):
+        """
+        基于发送时间调整块大小
+        """
+        # 计算发送速率(字节/秒)
+        if send_duration > 0:
+            send_rate = self.chunk_size / send_duration
+        else:
+            send_rate = float('inf')
+
+        # 调整策略
+        if send_rate < 10000:  # 低速网络(<10KB/s)
+            new_size = max(self.min_chunk, int(self.chunk_size * 0.8))
+        elif send_rate > 100000:  # 高速网络(>100KB/s)
+            new_size = min(self.max_chunk, int(self.chunk_size * 1.2))
+        else:
+            new_size = self.chunk_size
+
+        # 应用平滑过渡
+        self.chunk_size = int(0.7 * self.chunk_size + 0.3 * new_size)

+ 99 - 0
agent/fixed_intent.py

@@ -0,0 +1,99 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :intent.py
+@IDE        :PyCharm
+@Author     : 
+@Date       :2025/7/14 12:04
+'''
+
+
+import os
+import sys
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+
+from logger.loggering import server_logger
+from utils.utils import get_models
+from langchain_core.prompts import SystemMessagePromptTemplate
+from langchain_core.prompts import HumanMessagePromptTemplate
+from langchain_core.prompts import ChatPromptTemplate
+from langchain_core.prompts import FewShotChatMessagePromptTemplate
+from utils.yaml_utils import fixed_question_intent_config
+
+
+
+class FixedIntentIdentifyClient:
+
+    def __init__(self):
+        """
+            创建意图识别类
+        """
+          # 获取部署的模型列表
+        llm, chat, embed = get_models()
+        self.llm_recognition = chat
+        # 加载 意图识别系统配置信息
+        self.intent_prompt = fixed_question_intent_config
+
+
+
+    def recognize_intent(self, input):
+        """
+        意图识别
+        输入:用户输入的问题
+        输出:识别出的意图,可选项:
+        """
+        # 准备few-shot样例
+        examples = self.intent_prompt["fixed_problem_answer"]
+        #server_logger.info(f"加载prompt配置.examples: {examples}")
+
+        # 定义样本模板
+        examples_prompt = ChatPromptTemplate.from_messages(
+            [
+                ("human", "{inn}"),
+                ("ai", "{out}"),
+            ]
+        )
+        few_shot_prompt = FewShotChatMessagePromptTemplate(example_prompt=examples_prompt,
+                                                           examples=examples)
+        final_prompt = ChatPromptTemplate.from_messages(
+            [
+                ('system', self.intent_prompt["system_prompt"]),
+                few_shot_prompt,
+                ('human', '{input}'),
+            ]
+        )
+
+        chain = final_prompt | self.llm_recognition
+        result = chain.invoke(input=input)
+        # 容错处理
+        if hasattr(result, 'content'):
+            # 如果 result 有 content 属性,使用它
+            return result.content
+        else:
+            # 否则,直接返回 result
+            return result
+
+
+
+
+
+fixed_intent_identify_client = FixedIntentIdentifyClient()
+
+
+if __name__ == '__main__':
+    #input = "你好"
+    #input = "我要查询信息"  #result=cattle_farm_query
+    input = "查询最近步数情况" #result=cattle_farm_query
+    input = "这套系统给我带来了什么好处"  #result=cattle_farm_warning_plan
+    input = "建设数字化后给我带来了多少效益" #result=cattle_farm_warning_task_execute
+    #input = "查询11基本信息"
+    #input = "查询16号的信息,返回json"
+    # input = "查询16号的信息,返回markdown"
+    # input = "编号为0014的数据,返回JSON格式数据"
+    # input = "查询0013的信息"
+    # input = "查询0013的数据"
+    # input = """
+    result = fixed_intent_identify_client.recognize_intent(input)
+    server_logger.info(f"input={input} ,result={result}")
+    

+ 9 - 0
agent/generator_title.py

@@ -0,0 +1,9 @@
+# !/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :generator_title.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/22 09:33
+'''

+ 125 - 0
agent/intent.py

@@ -0,0 +1,125 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :intent.py
+@IDE        :PyCharm
+@Author     : 
+@Date       :2025/7/14 12:04
+'''
+
+
+import os
+import sys
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+
+from logger.loggering import server_logger
+from utils.utils import get_models
+from langchain_core.prompts import SystemMessagePromptTemplate
+from langchain_core.prompts import HumanMessagePromptTemplate
+from langchain_core.prompts import ChatPromptTemplate
+from langchain_core.prompts import FewShotChatMessagePromptTemplate
+from utils import yaml_utils
+from agent.session.session_manager import SessionContextMemoryManager
+from base.config import config_handler
+
+
+class IntentIdentifyClient:
+
+    def __init__(self):
+        """
+            创建意图识别类
+        """
+          # 获取部署的模型列表
+        llm, chat, embed = get_models()
+        self.llm_recognition = chat
+        # 加载 意图识别系统配置信息
+        self.intent_prompt = yaml_utils.get_intent_prompt()
+
+    async def recognize_intent(self , trace_id: str , config: dict , input: str):
+        """
+        意图识别
+        输入:用户输入的问题
+        输出:识别出的意图,可选项:
+        - cattle_farm_common
+        - cattle_farm_query
+        - cattle_farm_warning_plan
+        """
+        session_id = config.sessionId
+        use_history_recognize_intent = config_handler.get("lru", "USE_RECOGNIZE_INTENT_HISTORY_MESSAGES" , "False")
+        server_logger.info(f"[使用用户最新历史记录作为意图识别]use_history_recognize_intent: {use_history_recognize_intent}")
+        history = "无"
+        if use_history_recognize_intent == "True":
+            # 上下文管理器
+            session_context_memory_manager = SessionContextMemoryManager(trace_id , session_id)
+            # 获取内存最新的多少条历史记录(将消息列表序列化为字符串)
+            history = await session_context_memory_manager.get_memory_last_history_str()
+        # 根据历史记录和用户问题进行识别意图
+        return self.recognize_intent_history(input=input , history=history)
+
+
+    def recognize_intent_history(self , input: str , history="无"):
+        """
+        意图识别
+        输入:用户输入的问题
+        输出:识别出的意图,可选项:
+        - cattle_farm_common
+        - cattle_farm_query
+        - cattle_farm_warning_plan
+        """
+        # 准备few-shot样例
+        examples = self.intent_prompt["intent_examples"]
+        #server_logger.info(f"加载prompt配置.examples: {examples}")
+        system_prompt = self.intent_prompt["system_prompt"]
+        system_prompt = system_prompt.format(history=history)
+        server_logger.info(f"增加用户历史记录,用于意图识别,prompt配置.system_prompt: {system_prompt}")
+
+        # 定义样本模板
+        examples_prompt = ChatPromptTemplate.from_messages(
+            [
+                ("human", "{inn}"),
+                ("ai", "{out}"),
+            ]
+        )
+        few_shot_prompt = FewShotChatMessagePromptTemplate(example_prompt=examples_prompt,
+                                                           examples=examples)
+        final_prompt = ChatPromptTemplate.from_messages(
+            [
+                ('system', system_prompt),
+                few_shot_prompt,
+                ('human', '{input}'),
+            ]
+        )
+
+        chain = final_prompt | self.llm_recognition
+        server_logger.info(f"意图识别输入input: {input}")
+        result = chain.invoke(input={"input": input})
+        # 容错处理
+        if hasattr(result, 'content'):
+            # 如果 result 有 content 属性,使用它
+            return result.content
+        else:
+            # 否则,直接返回 result
+            return result
+
+
+
+
+
+intent_identify_client = IntentIdentifyClient()
+
+
+if __name__ == '__main__':
+    #input = "你好"
+    #input = "我要查询信息"  #result=cattle_farm_query
+    input = "查询最近步数情况" #result=cattle_farm_query
+   
+    
+    input = "01、10"
+    input = "继续"
+    input = "当前没有"
+    #input = "查询12步数"
+    #input = "分析当前环境数据"
+    result = intent_identify_client.recognize_intent_history(history=history , input=input)
+    server_logger.info(f"result={result}")
+    

+ 10 - 0
agent/session/__init__.py

@@ -0,0 +1,10 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :__init__.py.py
+@IDE        :Cursor
+@Author     : 
+@Date       :2025/7/30
+'''
+

+ 344 - 0
agent/session/session_manager.py

@@ -0,0 +1,344 @@
+# !/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :session_manager.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/24 03:03
+'''
+
+import asyncio
+import logging
+import re
+import time
+from typing import Any, Dict, List, Tuple
+# 假设的导入(根据实际框架调整)
+from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
+from base.config import config_handler
+from base.redis_config import load_config_from_env
+from base.redis_connection import RedisConnectionFactory
+from base.async_redis_lock import AsyncRedisLock
+
+from langchain.memory import ConversationBufferMemory
+from langchain_community.chat_message_histories import RedisChatMessageHistory
+from langchain_core.runnables.history import RunnableWithMessageHistory
+from langchain_core.messages import get_buffer_string
+from langchain_core.messages import messages_to_dict, messages_from_dict
+from langchain.prompts import PromptTemplate
+from utils.utils import get_models
+import warnings
+from langchain_core._api.deprecation import LangChainDeprecationWarning
+from logger.loggering import server_logger
+from utils.yaml_utils import system_prompt_config
+
+from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, SystemMessage, ToolMessage, FunctionMessage
+
+
+
+class SessionManager:
+    """集中管理会话状态和锁定机制"""
+
+    def __init__(self , trace_id: str , lock_key_prefix: str , session_id: str, client_id: str = "default"):
+        self.trace_id = trace_id
+        self.session_id = session_id
+        self.client_id = client_id
+        self.session_lock = None
+        self.session_lock_key = lock_key_prefix + session_id
+        # 上下文管理器
+        self.session_context_memory_manager = SessionContextMemoryManager(trace_id , session_id)
+        
+        
+    async def is_session_locked(self) -> bool:
+        """检查会话是否被其他设备锁定"""
+        if await self.redis_client.exists(self.session_lock_key):
+            return True
+        return False
+
+    async def acquire_session_lock(self, timeout: float = 5) -> bool:
+        """尝试获取会话锁,带超时机制"""
+        config_is_lock = config_handler.get("chat", "CHAT_SESSION_LOCK" , "True")
+        server_logger.info(trace_id =self.trace_id, msg=f"创建新会话: {self.session_lock_key},config_is_lock:{config_is_lock} (锁定设备: {self.client_id})")
+        if config_is_lock == "False":
+            return True
+        
+        try:
+         # 通过工厂模式获取 redis 连接器
+            self.redis_client = await RedisConnectionFactory.get_connection()
+            self.session_lock = AsyncRedisLock(self.redis_client, self.session_lock_key)
+            server_logger.info(trace_id =self.trace_id, msg=f"创建新会话: {self.session_lock_key} (锁定设备: {self.client_id})")
+            flag = await self.session_lock.acquire(timeout)
+            server_logger.debug(trace_id =self.trace_id, msg=f"尝试获取锁:{self.session_lock_key}-{flag}")
+            return flag
+        except asyncio.TimeoutError:
+            server_logger.warning(trace_id =self.trace_id, msg=f"获取会话锁超时: {self.session_lock_key}")
+            return False
+        except Exception as e:
+            server_logger.error(trace_id =self.trace_id, msg=f"获取会话锁失败: {self.session_lock_key}, 错误: {e}")
+            return False
+
+    async def release_session_lock(self):
+        """释放会话锁"""
+        config_is_lock = config_handler.get("chat", "CHAT_SESSION_LOCK" , "True")
+        server_logger.info(trace_id =self.trace_id, msg=f"释放新会话: {self.session_lock_key},config_is_lock:{config_is_lock} (锁定设备: {self.client_id})")
+        if config_is_lock == "False":
+            return
+        try:
+            if self.session_lock:
+                await self.session_lock.release()
+        except Exception as e:
+            server_logger.error(trace_id =self.trace_id, msg=f"释放会话锁失败: {self.session_lock_key}, 错误: {e}")
+
+
+    async def get_memory_history(self):
+        """
+            获取会话历史
+        """
+        return await self.session_context_memory_manager.get_memory_history()
+    
+    async def save_update_memory_history(self , history_messages , input_message  , output_message):
+        """
+            保存并更新历史会话
+        """
+        # 同步执行保存更新会话记录操作
+        #await self.session_context_memory_manager.save_update_memory_history(history_messages , input_message  , output_message)
+
+         # 创建任务但不等待(不阻塞)
+        asyncio.create_task(self.session_context_memory_manager.save_update_memory_history(history_messages , input_message  , output_message))
+        server_logger.info(trace_id =self.trace_id, msg=f"{self.session_id}: 保存并更新历史会话任务已创建,主协程继续执行结束")
+
+
+
+
+
+class SessionContextMemoryManager:
+    """
+        会话内存上下文管理器
+    """
+
+    def __init__(self , trace_id: str, session_id: str):
+        self.trace_id = trace_id
+        self.session_id = session_id
+        self.redis_memory = None
+        # 最大历史记录长度,超过后进行摘要处理
+        self.max_length = int(config_handler.get("lru", "AGENT_MAX_HISTORY_TOKENS"))
+        # 意图识别 可以使用最大多少条历史记录
+        self.recognize_intent_max_history = int(config_handler.get("lru", "AGENT_RECOGNIZE_INTENT_MAX_HISTORY_MESSAGES"))
+        llm, chat, embed = get_models()
+        self.llm = llm
+         # 固定系统提示词
+        self.system_prompt = system_prompt_config["summary_system_prompt"]
+        # 初始化 redis 聊天历史
+        self.init_redis_chat_history_memory()
+    
+
+    def init_redis_chat_history_memory(self):
+        """
+            获取 Redis 中指定会话的聊天记录
+        """
+        # 使用 contextmanager 仅在该代码块内忽略警告
+        with warnings.catch_warnings():
+            warnings.filterwarnings("ignore", category=LangChainDeprecationWarning)
+            redis_config = load_config_from_env()
+            #server_logger.info(trace_id =self.trace_id, msg=f"redis_config={redis_config}")
+            # 使用 RedisChatMessageHistory 存储对话历史
+            chat_history = RedisChatMessageHistory(
+                session_id=self.session_id,  # 唯一标识会话
+                url=redis_config.url  # 或直接使用 redis_client
+            )
+            #使用 Redis 存储记忆
+            self.redis_memory = ConversationBufferMemory(
+                memory_key="chat_history",
+                return_messages=True,
+                chat_memory=chat_history  # 或其他兼容存储
+            )
+
+        server_logger.info(trace_id=self.trace_id, msg=f"redis 内存上下文历史初始完成={self.redis_memory}")
+        return self.redis_memory
+
+
+    async def get_memory_history(self):
+        """
+            获取内存历史(原始记录)
+        """
+        history_messages = self.load_memory_history()
+        server_logger.debug(trace_id=self.trace_id, msg=f"begin session_id:{self.session_id}, session.history.len: {len(history_messages)}, session.history: {history_messages}")
+        return history_messages
+
+
+    async def get_memory_last_history_str(self):
+        """
+            获取内存最新的多少条历史记录(将消息列表序列化为字符串)
+                # 示例消息列表
+                messages = [
+                    HumanMessage(content="你好!"),
+                    AIMessage(content="我是AI助手。")
+                ]
+                # 转换为字符串
+                formatted_str = get_buffer_string(
+                    messages,
+                    human_prefix="User",  # 人类消息的前缀(默认"Human")
+                    ai_prefix="Assistant",  # AI消息的前缀(默认"AI")
+                    separator="\n"  # 消息分隔符(默认"\n")
+                )
+                输出结果:text
+                    User: 你好!
+                    Assistant: 我是AI助手。
+
+        """
+        history_messages = self.load_memory_history()
+        if history_messages is None or len(history_messages) == 0:
+            return "无"
+        truncated_last_history_messages = history_messages
+        if len(history_messages) > self.recognize_intent_max_history:
+             # 截取最新的 10 条消息-使用负数索引从末尾切片
+             truncated_last_history_messages = history_messages[-self.recognize_intent_max_history:]
+        # 使用安全转换 - 处理markdown格式
+        #truncated_last_history_messages = self._simplify_messages_content(truncated_last_history_messages)
+         # 获取历史字符串
+        history_messages_str = get_buffer_string(truncated_last_history_messages)
+        # 先转义大括号
+        history_messages_str = history_messages_str.replace('{', '{{').replace('}', '}}')
+        server_logger.info(trace_id=self.trace_id, msg=f"recognize_intent_history session_id:{self.session_id}, session.history.len: {len(history_messages)}, truncated.last.history.len: {len(truncated_last_history_messages)}, history_messages_str: {history_messages_str}")
+        return history_messages_str
+
+
+    def load_memory_history(self):
+        """
+            加载历史会话
+        """
+        return self.redis_memory.load_memory_variables({})["chat_history"]
+        
+
+    def save_memory_history(self, input  , output):
+        """
+            保存历史会话
+        """
+        self.redis_memory.save_context({"input": input}, {"output": output})
+
+    
+    async def save_update_memory_history(self , history_messages , input_message  , output_message):
+        """
+            如果历史记录 超出 最大长度,则先清除历史记录,再保存历史会话的摘要
+            输入参数:
+                history_messages: 历史会话
+                input_message: 输入
+                output_message: 输出
+        """
+        cur_messages = [input_message] + [output_message]
+        tmp_messages = list(history_messages) + cur_messages
+        history_messages , is_summary = await self.compress_chat_history(tmp_messages)
+        server_logger.info(trace_id =self.trace_id, msg=f"保存更新历史记录处理:session_id={self.session_id},is_summary={is_summary}")
+        
+        if is_summary:
+            # 如果是摘要消息,则清除保存的摘要消息
+            if isinstance(history_messages[0], SystemMessage):
+                self.clear_save_summary_memory(history_messages[0].content)
+            server_logger.debug(trace_id=self.trace_id, msg=f"end session_id:{self.session_id}, session.history.len: {len(history_messages)}, session.history: {history_messages}")
+        else:
+            server_logger.debug(trace_id=self.trace_id, msg=f"end session_id:{self.session_id}, session.history.len: {len(tmp_messages)}, session.history: {tmp_messages}")
+            self.redis_memory.save_context({"input": input_message.content}, {"output": output_message.content})
+
+
+    
+    def clear_save_summary_memory(self , summary_text):
+        """
+            清除原始的记录,保存的摘要历史会话
+        """
+        try:
+            server_logger.info(trace_id =self.trace_id, msg=f"type(summary_text): {type(summary_text)}")
+            server_logger.info(trace_id =self.trace_id, msg=f"summary_text: {summary_text}")
+            if not isinstance(summary_text, str):
+                # 安全兜底:尝试提取 .content
+                try:
+                    summary_text = str(summary_text.content)
+                except AttributeError:
+                    summary_text = repr(summary_text)  # 最后手段
+            self.redis_memory.clear()
+            server_logger.info(trace_id =self.trace_id, msg=f"session_id={self.session_id}先清除历史会话记录完成")
+            self.redis_memory.save_context({"input": "整理后对话摘要"}, {"output": summary_text})
+            server_logger.info(trace_id =self.trace_id, msg=f"session_id={self.session_id}再保存摘要历史记录完成")
+        except Exception as e:
+            server_logger.error(trace_id =self.trace_id, msg=f"clear_save_summary_memory error: {e}")
+        
+
+    async def compress_chat_history(self , chat_history):
+        """
+            压缩聊天历史,如果超过 max_length 字符则生成摘要
+        """
+        his_len = len(get_buffer_string(chat_history))
+        server_logger.info(trace_id =self.trace_id, msg=f"his_len={his_len},max_length={self.max_length}")
+        if his_len < self.max_length:
+            #server_logger.info(trace_id =self.trace_id, msg="get_buffer_string(chat_history) < max_length")
+            return chat_history , False
+
+        summary_prompt = PromptTemplate(
+            input_variables=["history"],
+            template=self.system_prompt
+        )
+        # 创建可运行链:prompt + llm
+        chain = summary_prompt | self.llm  # 等价于 LLMChain 的功能
+        # 获取历史字符串
+        history_str = get_buffer_string(chat_history)
+        # 异步调用(新 API)
+        summary_response = await chain.ainvoke({"history": history_str})
+        server_logger.info(trace_id =self.trace_id, msg=f"session_id={self.session_id},summary_text")
+        # 返回一个“系统消息”表示摘要
+        system_message = [SystemMessage(content=f"对话摘要:{summary_response.content}")]
+        return system_message , True
+
+
+
+
+    def _simplify_messages_content(self , history_messages):
+        """
+            简化消息内容
+            处理 Markdown 内容
+        """
+        cleaned_messages = []
+        # 在处理过程中简化内容
+        for message in history_messages:
+            if not hasattr(message, 'content') or not isinstance(message.content, str):
+                # 如果消息没有 content 或 content 不是字符串,直接添加
+                cleaned_messages.append(message)
+                continue
+                
+            content = message.content
+            content = self._simplify_markdown_content(content)
+            # 创建新消息对象,保留原始消息的类型和元数据,只更新 content
+            if isinstance(message, HumanMessage):
+                new_msg = HumanMessage(content=content, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
+            elif isinstance(message, AIMessage):
+                new_msg = AIMessage(content=content, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
+            elif isinstance(message, SystemMessage):
+                new_msg = SystemMessage(content=content, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
+            elif isinstance(message, ToolMessage):
+                # ToolMessage 的 content 可能不是用户生成的文本,通常不处理
+                new_msg = ToolMessage(content=content, tool_call_id=message.tool_call_id, name=message.name, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
+            elif isinstance(message, FunctionMessage):
+                new_msg = FunctionMessage(content=content, name=message.name, additional_kwargs=message.additional_kwargs, response_metadata=message.response_metadata)
+            else:
+                # 对于未知类型,尝试通用方式(如果 BaseMessage 支持)
+                new_msg = message.__class__(content=content, **{k: v for k, v in message.__dict__.items() if k != 'content'})
+            
+            cleaned_messages.append(new_msg)
+        
+        return cleaned_messages
+    
+
+    def _simplify_markdown_content(self , content):
+        """简化 Markdown 内容"""
+        # 移除表格
+        content = re.sub(r'\|.*\|.*\n\|.*\|.*(\n\|.*\|.*)*', '[表格数据]', content)
+        # 移除标题
+        content = re.sub(r'#{1,6}\s*', '', content)
+        # 移除粗体斜体
+        content = re.sub(r'[*_]{1,2}(.*?)[*_]{1,2}', r'\1', content)
+        # 移除表情符号和特殊标记
+        content = re.sub(r'[✅❌📋📊]', '', content)
+        # 标准化换行
+        content = re.sub(r'\n{3,}', '\n\n', content)
+        return content.strip()
+
+
+

+ 71 - 0
base/async_redis_lock.py

@@ -0,0 +1,71 @@
+import asyncio
+import time
+import uuid
+from typing import Optional
+from logger.loggering import server_logger
+
+class AsyncRedisLock:
+    def __init__(self, redis_client, lock_name: str, expire_time: int = 30):
+        """
+        :param redis_client: 异步 Redis 客户端连接
+        :param lock_name: 锁的名称
+        :param expire_time: 锁的过期时间(秒)
+        """
+        self.redis = redis_client
+        self.lock_name = lock_name
+        self.expire_time = expire_time
+        self.identifier = str(uuid.uuid4())  # 唯一标识,用于安全释放锁
+
+    async def acquire(self, timeout: float = 10) -> bool:
+        """
+        异步获取锁
+        :param timeout: 获取锁的超时时间(秒)
+        :return: 是否成功获取锁
+        """
+        end = time.time() + timeout
+        while time.time() < end:
+            #server_logger.info(f"尝试获取锁: {self.lock_name},{self.identifier},{self.expire_time}")
+            # 尝试获取锁
+            if await self.redis.set(
+                self.lock_name, 
+                self.identifier, 
+                nx=True, 
+                ex=self.expire_time
+            ):
+                return True
+            await asyncio.sleep(0.001)  # 短暂等待后重试
+        return False
+
+    async def release(self) -> bool:
+        """
+        异步释放锁
+        :return: 是否成功释放锁
+        """
+        # 使用 Lua 脚本保证原子性
+        unlock_script = """
+        if redis.call("get", KEYS[1]) == ARGV[1] then
+            return redis.call("del", KEYS[1])
+        else
+            return 0
+        end
+        """
+        try:
+            # 注意这里参数传递方式与同步版本不同
+            result = await self.redis.eval(
+                unlock_script, 
+                1 , 
+                self.lock_name, 
+                self.identifier
+            )
+            return bool(result)
+        except Exception as e:
+            print(f"Error releasing lock: {e}")
+            return False
+
+    async def __aenter__(self):
+        if not await self.acquire():
+            raise Exception("Could not acquire lock")
+        return self
+
+    async def __aexit__(self, exc_type, exc_val, exc_tb):
+        await self.release()

+ 35 - 0
base/config.py

@@ -0,0 +1,35 @@
+# !/usr/bin/python
+# -*- encoding: utf-8 -*-
+"""
+@Time    :   2025/07/10 14:40
+@Author  :   
+@File    :   config.py
+@Software:   VScode
+@Desc    :   None
+"""
+from configparser import ConfigParser
+
+
+class ConfigHandler:
+    def __init__(self, config_file=""):
+        self.config = ConfigParser()
+        self.config.read(config_file, encoding='utf-8')
+
+    def get(self, section, option, default=None):
+        try:
+            if section == "before":
+                option = f"online_{option}" if bool(self.config.get("general", "is_online")) else f"inline_{option}"
+                value = self.config.get(section, option)
+            else:
+                value = self.config.get(section, option)
+            if "#" in value:
+                value = value.split('#')[0].strip()
+        except Exception as err:
+            value = default
+        return value
+
+    def getboolean(self, section, option):
+        return self.config.getboolean(section, option)
+
+
+config_handler = ConfigHandler("./config/config.ini")

+ 39 - 0
base/redis_config.py

@@ -0,0 +1,39 @@
+# !/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :redis_config.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/21 13:44
+'''
+
+from dataclasses import dataclass
+from base.config import config_handler
+
+
+@dataclass
+class RedisConfig:
+    """Redis 连接配置"""
+    url: str = "redis://127.0.0.1:6379"
+    host: str = "127.0.0.1"
+    port: int = 6379
+    password: str = None
+    db: int = 0
+    max_connections: int = 50
+    session_prefix: str = "session:"
+    lock_prefix: str = "lock:"
+    session_ttl: int = 3600  # 会话过期时间(秒)
+
+
+
+def load_config_from_env() -> tuple[RedisConfig]:
+    """从环境变量加载配置"""
+    redis_config = RedisConfig(
+        url=config_handler.get("redis", "REDIS_URL", "redis://127.0.0.1:6379"),
+        password=config_handler.get("redis", "REDIS_PASSWORD"),
+        db=int(config_handler.get("redis", "REDIS_DB", "0")),
+        max_connections=int(config_handler.get("redis", "REDIS_MAX_CONNECTIONS", "50"))
+    )
+    return redis_config
+

+ 212 - 0
base/redis_connection.py

@@ -0,0 +1,212 @@
+# !/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :redis_connection.py.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/21 15:07
+'''
+import redis                     # 同步专用
+from redis import asyncio as aioredis
+
+
+from typing import Optional, Protocol, Dict, Any
+from base.redis_config import RedisConfig
+from base.redis_config import load_config_from_env
+from logger.loggering import server_logger
+from typing import Dict, Any, List
+from typing import Tuple, Optional
+from langchain_community.storage import RedisStore
+
+class RedisConnection(Protocol):
+    """
+    Redis 接口协议
+    """
+    async def get(self, key: str) -> Any: ...
+
+    async def set(self, key: str, value: Any, ex: Optional[int] = None, nx: bool = False) -> bool: ...
+
+    async def hget(self, key: str, field: str) -> Any: ...
+
+    async def hset(self, key: str, field: str, value: Any) -> int: ...
+
+    async def hmset(self, key: str, mapping: Dict[str, Any]) -> bool: ...
+
+    async def hgetall(self, key: str) -> Dict[str, Any]: ...
+
+    async def delete(self, *keys: str) -> int: ...
+
+    async def exists(self, key: str) -> int: ...
+
+    async def expire(self, key: str, seconds: int) -> bool: ...
+
+    async def scan(self, cursor: int, match: Optional[str] = None, count: Optional[int] = None) -> tuple[
+        int, list[str]]: ...
+
+    async def eval(self, script: str, keys: list[str], args: list[str]) -> Any: ...
+    async def close(self) -> None: ...
+
+
+
+
+
+class RedisAdapter(RedisConnection):
+    """
+    Redis 适配器
+    """
+    def __init__(self, config: RedisConfig):
+        self.config = config
+        # 用于普通Redis 操作存储
+        self._redis = None
+        # 用于 langchain RedisStore 存储
+        self._langchain_redis_client = None
+
+    async def connect(self):
+        """创建Redis连接"""
+        self._redis = await aioredis.from_url(
+            self.config.url,
+            password=self.config.password,
+            db=self.config.db,
+            encoding="utf-8",
+            decode_responses=True,
+            max_connections=self.config.max_connections
+        )
+        # 用于 langchain RedisStore 存储  
+        # 必须设为 False(LangChain 需要 bytes 数据)
+        self._langchain_redis_client = aioredis.from_url(
+            self.config.url,
+            password=self.config.password,
+            db=self.config.db,
+            encoding="utf-8",
+            decode_responses=False,
+            max_connections=self.config.max_connections
+        )
+       
+        # ✅ 使用同步 Redis 客户端
+        # self._langchain_redis_client = redis.Redis.from_url(
+        #     self.config.url,
+        #     password=self.config.password,
+        #     db=self.config.db,
+        #     decode_responses=False,  # LangChain 需要 bytes
+        # )
+        #错误:Expected Redis client, got Redis instead 
+        # self._langchain_redis_client = async_redis.from_url(
+        #         self.config.url,
+        #         password=self.config.password,
+        #         db=self.config.db,
+        #         decode_responses=False
+        #     )
+      
+        return self
+
+    async def get(self, key: str) -> Any:
+        return await self._redis.get(key)
+
+    async def set(self, key: str, value: Any, ex: Optional[int] = None, nx: bool = False) -> bool:
+        return await self._redis.set(key, value, ex=ex, nx=nx)
+
+    async def hget(self, key: str, field: str) -> Any:
+        return await self._redis.hget(key, field)
+
+    async def hset(self, key: str, field: str, value: Any) -> int:
+        return await self._redis.hset(key, field, value)
+
+    async def hmset(self, key: str, mapping: Dict[str, Any]) -> bool:
+        return await self._redis.hmset(key, mapping)
+
+    async def hgetall(self, key: str) -> Dict[str, Any]:
+        return await self._redis.hgetall(key)
+
+    async def delete(self, *keys: str) -> int:
+        return await self._redis.delete(*keys)
+
+    async def exists(self, key: str) -> int:
+        return await self._redis.exists(key)
+
+    async def expire(self, key: str, seconds: int) -> bool:
+        return await self._redis.expire(key, seconds)
+
+    async def scan(self, cursor: int, match: Optional[str] = None, count: Optional[int] = None) -> tuple[
+        int, list[str]]:
+        return await self._redis.scan(cursor, match=match, count=count)
+    
+    async def eval(self, script: str, numkeys: int, *keys_and_args: str) -> Any:
+        return await self._redis.eval(script, numkeys, *keys_and_args) #  解包成独立参数
+
+
+    def get_langchain_redis_client(self):
+        return self._langchain_redis_client
+
+    async def close(self) -> None:
+        if self._redis:
+            await self._redis.close()
+            await self._redis.wait_closed()
+        if self._langchain_redis_client:
+            await self._langchain_redis_client.close()
+            await self._langchain_redis_client.wait_closed()
+
+
+
+
+class RedisConnectionFactory:
+    """
+    redis 连接工厂函数
+    """
+    _connections: Dict[str, RedisConnection] = {}
+    _stores: Dict[str, RedisStore] = {}
+
+    @classmethod
+    async def get_connection(cls) -> RedisConnection:
+        """获取Redis连接(单例模式)"""
+        # 加载配置
+        redis_config = load_config_from_env()
+        #server_logger.info(f"redis_config={redis_config}")
+        # 使用配置参数生成唯一标识
+        conn_id = f"{redis_config.url}-{redis_config.db}"
+
+        if conn_id not in cls._connections:
+            adapter = RedisAdapter(redis_config)
+            await adapter.connect()
+            cls._connections[conn_id] = adapter
+        return cls._connections[conn_id]
+
+    @classmethod
+    async def get_redis_store(cls) -> RedisStore:
+        """获取 LangChain RedisStore 实例"""
+        # 加载配置
+        redis_config = load_config_from_env()
+        conn = await cls.get_connection()  # 或通过其他方式获取
+        client = conn.get_langchain_redis_client()
+        return client
+    @classmethod
+    async def get_langchain_redis_store(cls) -> RedisStore:
+        """获取 LangChain RedisStore 实例
+            目前该方法存在问题
+        """
+        # 加载配置
+        redis_config = load_config_from_env()
+        # 使用配置参数生成唯一标识
+        store_id = f"{redis_config.url}-{redis_config.db}"
+        if store_id not in cls._stores:
+            conn = await cls.get_connection()  # 或通过其他方式获取
+            client = conn.get_langchain_redis_client()
+            store = client
+            server_logger.info(f"client={client}")
+            server_logger.info(f"store={dir(store)}")
+            cls._stores[store_id] = store
+        return cls._stores[store_id]
+
+    @classmethod
+    async def close_all(cls):
+        """关闭所有Redis连接"""
+        for conn in cls._connections.values():
+            await conn.close()
+        cls._connections = {}
+
+    @classmethod
+    def get_connection_count(cls) -> int:
+        """获取当前连接数"""
+        return len(cls._connections)
+
+

+ 67 - 0
base/redis_lock.py

@@ -0,0 +1,67 @@
+# !/usr/bin/python
+# -*- encoding: utf-8 -*-
+"""
+@Time    :   2025/07/30 14:40
+@Author  :    
+@File    :   RedisLock.py
+@Software:   VScode
+@Desc    :   None
+"""
+
+
+import time
+import uuid
+
+class RedisLock:
+    """
+    Redis 锁类
+    """
+    
+    def __init__(self, redis_client, lock_name, expire_time=30):
+        """
+        :param redis_client: Redis 客户端连接
+        :param lock_name: 锁的名称
+        :param expire_time: 锁的过期时间(秒)
+        """
+        self.redis = redis_client
+        self.lock_name = lock_name
+        self.expire_time = expire_time
+        self.identifier = str(uuid.uuid4())  # 唯一标识,用于安全释放锁
+
+    def acquire(self, timeout=10):
+        """
+        获取锁
+        :param timeout: 获取锁的超时时间(秒)
+        :return: 是否成功获取锁
+        """
+        end = time.time() + timeout
+        while time.time() < end:
+            
+            # 尝试获取锁
+            if self.redis.set(self.lock_name, self.identifier, nx=True, ex=self.expire_time):
+                return True
+            time.sleep(0.001)  # 短暂等待后重试
+        return False
+
+    def release(self):
+        """
+        释放锁
+        """
+        # 使用 Lua 脚本保证原子性
+        unlock_script = """
+        if redis.call("get", KEYS[1]) == ARGV[1] then
+            return redis.call("del", KEYS[1])
+        else
+            return 0
+        end
+        """
+        self.redis.eval(unlock_script, 1, self.lock_name, self.identifier)
+
+
+    def __enter__(self):
+        if not self.acquire():
+            raise Exception("Could not acquire lock")
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.release()

TEMPAT SAMPAH
config/.DS_Store


+ 57 - 0
config/config.ini

@@ -0,0 +1,57 @@
+
+
+[model]
+MODEL_TYPE=qwen
+
+[deepseek]
+DEEPSEEK_SERVER_URL=https://api.deepseek.com
+DEEPSEEK_MODEL_ID=deepseek-chat
+DEEPSEEK_API_KEY=
+
+[qwen]
+MODEL_SERVER_URL=https://api-inference.modelscope.cn/v1/
+CHAT_MODEL_ID=Qwen/Qwen3-30B-A3B
+API_KEY=ms-61bf873e-7536-42a9-b830-b12dca656e1f
+
+
+[api_key]
+DASHSCOPE_API_KEY=sk-9fca4fca37ce4f509ec9ead71ccdd542
+EMBED_MODEL_ID=text-embedding-v4
+
+[mcp]
+MCP_SERVER_CONFIG_PATH=mulian_servers_config.json
+
+
+[app]
+APP_CODE=lq-agent
+APP_SECRET=sx-73d32556-605e-11f0-9dd8-acde48001122
+
+[lru]
+AGENT_MAX_HISTORY_TOKENS=50000
+AGENT_MAX_HISTORY_MESSAGES=20
+AGENT_RECOGNIZE_INTENT_MAX_HISTORY_MESSAGES=6
+USE_RECOGNIZE_INTENT_HISTORY_MESSAGES=True
+
+[redis]
+REDIS_URL=redis://:123456@127.0.0.1:6379
+REDIS_HOST=127.0.0.1
+REDIS_PORT=6379
+REDIS_DB=0
+REDIS_PASSWORD=123456
+REDIS_MAX_CONNECTIONS=50
+
+[log]
+LOG_FILE_PATH=logs
+LOG_FILE_MAX_MB=10
+LOG_BACKUP_COUNT=5
+CONSOLE_OUTPUT=True
+
+[chat]
+CHAT_SESSION_LOCK=False
+
+
+[knowledge_dify]
+DIFY_SERVER_URL=http://dify.xiwudev.cn/v1
+DIFY_API_KEY=dataset-O4i1jbk1DTsUZz5KqndQxo6C
+DIFY_DATASET_ID_LIST=e4d3965e-f66a-45db-8010-fe7acdd46f5d,745e2f10-e7a6-4d54-83ef-13ff09cc700c
+DIFY_DATASET_URL=/datasets/{dataset_id}/retrieve

+ 8 - 0
config/mulian_servers_config.json

@@ -0,0 +1,8 @@
+ {
+    "@xiwuzc/@mulian/farm-info": {
+        "name": "业务工具列表",
+        "description": "一个支持任何MCP协议客户端的服务器。",
+        "type": "streamable-http",
+        "url": "http://localhost:3001/mcp/"
+    }
+}

+ 10 - 0
config/prompt/common_model_query.yaml

@@ -0,0 +1,10 @@
+
+# 任务提示词
+task_prompt: |
+  你是一个智能助手,根据提供的信息回答问题。
+
+
+
+# test
+template: |
+  ## 测试内容

+ 9 - 0
config/prompt/fixed_intent_prompt.yaml

@@ -0,0 +1,9 @@
+
+
+
+# 系统提示词 - 固定问题
+system_prompt: |
+  学习给定样例,根据问题匹配对应业务场景指令,如果无法匹配,请返回“other”。
+  严格遵守:必须仅返回指令内容,不包含格式说明或额外解释。
+
+

+ 20 - 0
config/prompt/intent_prompt.yaml

@@ -0,0 +1,20 @@
+
+# 系统提示词
+system_prompt: |
+  基于提供的样例,结合用户最近的对话历史上下文进行意图识别,精准匹配对应的业务场景指令。
+  必须优先参考最近的上下文语义及用户意图演变,若问题与样例中的任一业务场景相符,则返回对应指令;若无法匹配任何已定义场景,则返回 cattle_farm_common。
+  严格遵守:仅输出指令字符串,不附加任何解释、说明或格式。
+  用户目前历史上下文信息:
+  {history}
+
+
+
+
+# 意图案例 准备few-shot样例 联系数字专家进行诊断;
+intent_examples: 
+  - inn: 信息查询.
+    out: cattle_farm_common
+
+ 
+
+           

+ 19 - 0
config/prompt/system_prompt.yaml

@@ -0,0 +1,19 @@
+
+
+# 系统提示词
+system_prompt: |
+  分析专家于一身的AI助手,提供全方位的智能化指导。
+        你的建议要务实、经济、易操作,并能基于物联网数据提供精准预警和具体解决方案。
+            
+
+    
+# 用户上下文会话记录 摘要提示词
+summary_system_prompt: |
+  请总结以下对话内容,保留关键信息:
+  {history}
+
+
+
+# test
+template: |
+  ## 测试内容

+ 22 - 0
config/servers_config.json

@@ -0,0 +1,22 @@
+ {
+    "filesystem": {
+        "command": "npx",
+        "args": [
+            "-y",
+            "@modelcontextprotocol/server-filesystem",
+            "."  
+        ]
+    },
+    "@modelscope/@modelcontextprotocol/fetch": {
+        "name": "Fetch网页内容抓取",
+        "description": "该服务器使大型语言模型能够检索和处理网页内容,将HTML转换为markdown格式,以便于更轻松地使用。",
+        "type": "sse",
+        "url": "https://mcp.api-inference.modelscope.cn/sse/07630cdeaa1548"
+    },
+    "@modelscope/@amap/amap-maps": {
+        "name": "高德地图",
+        "description": "高德地图是一个支持任何MCP协议客户端的服务器,允许用户轻松利用高德地图MCP服务器获取各种基于位置的服务。",
+        "type": "sse",
+        "url": "https://mcp.api-inference.modelscope.cn/sse/1de8cdc801c546"
+    }
+}

+ 139 - 0
core/__init__.py

@@ -0,0 +1,139 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :__init__.py.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/10 16:30
+'''
+
+from langgraph.prebuilt import create_react_agent
+from langgraph.checkpoint.memory import MemorySaver
+from langchain_core.prompts import ChatPromptTemplate
+from langchain_core.messages import HumanMessage
+
+from logger.loggering import server_logger
+from utils.utils import get_models
+from function.function_call import FunctionCall
+
+from io import StringIO
+import sys
+
+
+class XiwuzcAgent:
+    """
+        Xiwuzc 智能助手+function call
+    """
+
+    def __init__(self):
+        # 初始化
+        self.init_agent()
+
+    # 初始化 agent 对象
+    def init_agent(self):
+        # 获取部署的模型列表
+        llm, chat, embed = get_models()
+        self.llm = llm
+        self.chat = chat
+        # 初始化 工具列表
+        function_call = FunctionCall()
+        tools = [
+            function_call.query_recently_cattle_farm_ambient_info,
+            function_call.query_recently_cattle_temperature,
+            function_call.query_recently_cattle_eat_water,
+        ]
+        # 创建系统Prompt提示语
+        system_prompt = self.create_sys_prompt()
+
+        prompt = ChatPromptTemplate.from_messages([
+            ("system", system_prompt),
+            ("placeholder", "{messages}"),
+            ("placeholder", "{agent_scratchpad}")
+        ])
+
+        # 创建Agent
+        self.agent_executor = create_react_agent(
+            self.llm,
+            tools=tools,  #
+            prompt=prompt,
+            checkpointer=MemorySaver()
+        )
+
+    def handle_query(self, input_query, session_id):
+        # 流式处理事件
+        config = {"configurable": {"thread_id": session_id}}
+        try:
+            events = self.agent_executor.stream(
+                {"messages": [HumanMessage(content=input_query)]},
+                config=config,
+                stream_mode="values",
+            )
+            result_list = []
+            # 打印流式事件的消息
+            for event in events:
+                message = event["messages"][-1]  # 取最后一步信息
+                result_list.append(message.content)
+                # 转换为字符串并写入日志文件
+                log_content = self.get_pretty_message_str(message)
+                server_logger.info("\n" + log_content.strip())
+
+            final_result = event["messages"][-1].content if result_list else None
+            server_logger.info("=" * 50)
+            server_logger.info(f"最终结果: \n {final_result}")
+            server_logger.info("=" * 50)
+
+            return final_result
+        except Exception as e:
+            server_logger.error(f"处理查询时出错: {e}")
+            raise e
+
+    # agent 非流式输出
+    def handle_invoke_query(self, input_query, session_id):
+        config = {"configurable": {"thread_id": session_id}}
+        try:
+            result = self.agent_executor.invoke(
+                {"messages": [HumanMessage(content=input_query)]},
+                config=config,
+                stream_mode="values",
+            )
+            server_logger.info(f"result={result}")
+            for presult in result["messages"]:
+                server_logger.info(f'【agent】: {presult}')
+            server_logger.info("=" * 50)
+            final_result_conent = result["messages"][-1].content
+            server_logger.info(f"final_result_conent={final_result_conent}")
+            return final_result_conent
+        except Exception as e:
+            server_logger.error(f"处理查询时出错: {e}")
+            raise e
+
+    def get_pretty_message_str(self, message):
+        """
+            捕获 pretty_print() 输出为字符串
+        """
+        captured_output = StringIO()
+        sys.stdout = captured_output
+        server_logger.info(message.pretty_print())
+        sys.stdout = sys.__stdout__
+        return captured_output.getvalue()
+
+    @staticmethod
+    def create_sys_prompt():
+        system_prompt = """
+            你是一个农业智能专家,请根据提供的数据信息和规则信息分析是否存在异常并进行建议。
+            请严格按照以下步骤操作:
+            1. 检查可用工具
+            2. 必要时调用工具获取数据
+            3. 结合数据进行分析
+            4. 给出专业建议
+
+            注意:
+            - 必须通过工具获取最新数据后再分析
+            - 保持回答专业且简洁
+            """
+        return system_prompt
+
+
+
+

+ 155 - 0
enums/common_enums.py

@@ -0,0 +1,155 @@
+
+from enum import Enum
+
+
+class ErrorCodeEnum(Enum):
+    """
+        错误码枚举定义
+    """
+    SUCCESS = ('100000', '成功')
+    ERROR = ('100500', '服务异常')
+
+    
+    SESSION_ID_EMPTY = ('100001', '会话ID为空')
+    BUSINSESS_SCENE_ERROR = ('100002', '业务场景错误')
+    INPUT_INFO_EMPTY = ('100003', '用户输入为空')
+    BUSINSESS_SCENE_EMPTY = ('100004', '业务场景为空')
+    BUSINSESS_SCENE_PROMPT_FILE_EMPTY = ('100005', '业务场景提示词文件为空')
+    BUSINSESS_SCENE_PROMPT_FILE_NOT_EXISTS = ('100006', '业务场景提示词文件不存在')
+    BUSINSESS_SCENE_PROMPT_FILE_READ_ERROR = ('100007', '业务场景提示词文件读取异常')
+    
+    def __init__(self, code : str, desc : str):
+        self.code = code
+        self.desc = desc
+
+    
+    def get_item_by_code(self , code : str):
+        """
+            根据code 找枚举项
+        """
+        for item in list(ErrorCodeEnum):
+            if item.code == code:
+                return item
+        return None
+
+    
+
+    def __str__(self) -> str:
+        return self.code + ":"  + self.desc
+    
+
+
+
+class BusinessSceneEnum(Enum):
+    """
+        业务场景枚举定义
+    """
+    COMMON_MODEL_QUERY = ('common_model_query', '通用模型查询场景' , 'common_model_query.yaml')
+    CATTLE_FARM_COMMMON = ('cattle_farm_common', '' , 'cattle_farm_common.yaml')
+    CATTLE_FARM_QUERY = ('cattle_farm_query', '' , 'cattle_farm_query.yaml', "json")
+    CATTLE_FARM_QUERY_FIXED = ('cattle_farm_query_fixed', '-固定' , 'cattle_farm_query_fixed.yaml', "json")
+    CATTLE_FARM_IOT = ('cattle_farm_iot', '' , 'cattle_farm_iot.yaml')
+
+
+
+    
+    CATTLE_FARM_WARNING_PLAN = ('cattle_farm_warning_plan', '' , 'cattle_farm_warning_plan.yaml', "json")
+    CATTLE_FARM_WARNING_TASK_EXECUTE = ('cattle_farm_warning_task_execute', '-任务执行' , 'cattle_farm_warning_task_execute.yaml')
+
+    def __init__(self, code, desc , prompt_file , data_type="text"):
+        self.code = code
+        self.desc = desc
+        self.prompt_file = prompt_file
+        self.data_type = data_type
+
+    @staticmethod  
+    def get_item_by_code(code):
+        """
+            根据code 找枚举项
+        """
+        for item in list(BusinessSceneEnum):
+            if item.code == code:
+                return item
+        return None
+
+    @staticmethod  
+    def get_item_by_code_def_val(code , def_val):
+        """
+            根据code 找枚举项,如果没有找到,则返回默认值
+        """
+        for item in list(BusinessSceneEnum):
+            if item.code == code:
+                return item
+        return def_val
+
+# print(BusinessSceneEnum.CATTLE_FARM_QUERY_PLAN.code , BusinessSceneEnum.CATTLE_FARM_QUERY_PLAN.desc)     # 
+# print(BusinessSceneEnum.CATTLE_FARM_WARNING_PLAN.code , BusinessSceneEnum.CATTLE_FARM_WARNING_PLAN.desc)    # 
+
+
+
+class CacheDataKeyTypeEnum(Enum):
+    """
+        缓存数据data key类型枚举
+            基本信息 cattle_info
+            体温信息 cattle_temperature 
+            步数信息 cattle_walk
+    """
+    
+    CATTLE_INFO = ('cattle_info', '基本信息')
+    CATTLE_TEMPERATURE = ('cattle_temperature', '体温信息')
+    CATTLE_WALK = ('cattle_walk', '步数信息')
+    RETRIEVER_RESOURCES = ('retriever_resources', '私有知识库检索溯源')
+
+    
+    
+    def __init__(self, code : str, desc : str):
+        self.code = code
+        self.desc = desc
+
+    
+    def get_item_by_code(self , code : str):
+        """
+            根据code 找枚举项
+        """
+        for item in list(CacheDataKeyTypeEnum):
+            if item.code == code:
+                return item
+        return None
+
+    
+
+    def __str__(self) -> str:
+        return self.code + ":"  + self.desc
+
+    
+
+
+class UserRoleEnum(Enum):
+    """
+        用户角色定义枚举
+            普通用户 common
+            租户用户 tenant 
+    """
+    
+    COMMON = ('common', '普通用户')
+    TENANT = ('tenant', '租户用户')
+   
+    
+    def __init__(self, code : str, desc : str):
+        self.code = code
+        self.desc = desc
+
+    
+    def get_item_by_code(self , code : str):
+        """
+            根据code 找枚举项
+        """
+        for item in list(UserRoleEnum):
+            if item.code == code:
+                return item
+        return None
+
+    
+
+    def __str__(self) -> str:
+        return self.code + ":"  + self.desc

+ 34 - 0
function/knowledge_dify.py

@@ -0,0 +1,34 @@
+
+
+
+
+from base.config import config_handler
+from logger.loggering import server_logger
+from utils.common import handler_err
+from utils.request_tool import KnowledgeDifyAction
+
+
+class KnowledgeDify:
+    """
+        私有知识库查询处理(dify)
+    """
+
+
+    def __init__(self):
+        self.knowledge_dify_action = KnowledgeDifyAction()
+
+
+    def get_request_knowledge_dify(self , trace_id: str, question: str):
+        """
+            获取知识库内容
+            :param question: 问题
+            :return:
+        """
+        record_list = self.knowledge_dify_action.get_request_knowledge_retrieve_list(
+            params={"query": question} , trace_id=trace_id
+        )
+        return record_list
+
+
+        
+    

+ 91 - 0
function/load_mcp_server.py

@@ -0,0 +1,91 @@
+import json
+import logging
+import os
+from dotenv import load_dotenv
+from typing import Any, Dict
+from langchain_mcp_tools import convert_mcp_to_langchain_tools
+
+from base.config import config_handler
+from logger.loggering import server_logger
+
+from utils.common import handler_err
+
+
+class LoadMcpServer:
+
+    def __init__(self):
+        self.tools = []   # 默认为空列表
+        self.cleanup = None
+        self.mcp_server_config_path = config_handler.get("mcp","MCP_SERVER_CONFIG_PATH")
+        # 加载MCP配置文件信息
+        self.mcp_configs = self.load_mcp_configs(self.mcp_server_config_path)
+    
+
+    def load_mcp_configs(self, conf_file_path: str) -> Dict[str, Any]:
+        """
+            从 JSON 文件加载服务器配置
+            Args:
+                conf_file_path: JSON 配置文件路径
+            Returns:
+                包含服务器配置的字典
+        """
+          # 获取当前文件的目录
+        current_dir = os.path.dirname(__file__)
+        # 构建到 .env 的相对路径
+        conf_file_path = os.path.join(current_dir , '../',  'config' , conf_file_path)
+        server_logger.info(f"加载服务器配置: {conf_file_path}")
+        if not os.path.exists(conf_file_path):
+            server_logger.error(f"配置文件不存在: {conf_file_path}")
+            raise ValueError(f"'conf_file_path':{conf_file_path} 不存在!!")
+        try:
+            with open(conf_file_path, "r") as f:
+                return json.load(f)
+
+
+        except Exception as e:
+            server_logger.error(f"❌ 加载 MCP 配置文件失败: {e}")
+
+
+    async def get_mcp_tools(self):
+        """
+            加载MCP-Server对应的工具列表
+        """
+        try:
+            if not self.tools:
+                # ...原有工具加载逻辑...
+                tools, cleanup = await convert_mcp_to_langchain_tools(
+                    self.mcp_configs,
+                    server_logger
+                )
+                self.tools = tools
+                self.cleanup = cleanup
+            # 打印工具列表
+            self.server_logger_tools_info()
+            server_logger.info(f"tools init success")
+        except Exception as e:
+            handler_err(server_logger, e, err_name="工具加载失败")
+        return self.tools
+    
+
+    def server_logger_tools_info(self):
+        """
+            打印 MCP 模型工具列表
+        """
+        server_logger.info(f"MCP 模型工具总共[{len(self.tools)}]个")
+        server_logger.info(f"MCP 模型工具列表分别如下:")
+        for tool in self.tools:
+            server_logger.info("=" * 50)
+            server_logger.info(f"工具名称: {tool.name}")
+            server_logger.info(f"工具描述: {tool.description}")
+            server_logger.info(f"工具参数: {tool.args}")
+
+    async def close(self):
+        """
+            关闭MCP服务端连接资源
+        """
+        if self.cleanup is not None:
+            try:
+                await self.cleanup()
+                server_logger.info("✅ MCP 资源已成功清理")
+            except Exception as e:
+                server_logger.error(f"⚠️ 清理 MCP 资源失败: {e}")

+ 50 - 0
function/local_function.py

@@ -0,0 +1,50 @@
+
+
+from datetime import datetime
+from function.knowledge_dify import KnowledgeDify
+from logger.loggering import server_logger 
+from utils.redis_utils import set_redis_result_cache_data
+from utils.tool_utils import verify_user_role
+from enums.common_enums import CacheDataKeyTypeEnum
+import json
+# 函数定义 要语义化,不能汉语拼音
+def get_current_datetime() -> str:
+    """
+        获取当前的日期和时间
+    """
+    now = datetime.now()
+    formatted_data = now.strftime("%Y-%m-%d %H:%M:%S")
+    return formatted_data
+
+
+
+
+knowledge_dify = KnowledgeDify()
+
+
+async def get_knowledge_answer(question: str , trace_id: str , user_role: str) -> str:
+    """
+        相关知识问题检索的工具
+        知识分类包括:政策法规、市场分析、基础、管理技术、案例库、高频问题、综合类
+        参数:
+            question: 用户问题中(用户输入问题)
+            trace_id: 日志链路跟踪ID
+            user_role: 用户角色
+    """
+    # 如果是普通用户直接返回无,只有租户用户才能检索查询知识库
+    if not verify_user_role(user_role):
+        return "无"
+    
+    record_list = knowledge_dify.get_request_knowledge_dify(trace_id=trace_id, question=question)
+    if record_list is None  or len(record_list) == 0:
+        return "无"
+    # 设置缓存数据,用于智能查询
+    retriever_resources_list = [record["segment"]["document"] for record in record_list]
+    server_logger.info(trace_id=trace_id, msg=f"知识库检索结果列表: retriever_resources_list={retriever_resources_list}")
+    await set_redis_result_cache_data(CacheDataKeyTypeEnum.RETRIEVER_RESOURCES.code, trace_id, json.dumps(retriever_resources_list))
+
+    content_list = [record["segment"]["content"] for record in record_list]
+    #server_logger.info(trace_id=trace_id, msg=f"知识库检索结果列表: content_list={content_list}")
+    return "\n".join(content_list)
+
+

+ 9 - 0
generate/__init__.py

@@ -0,0 +1,9 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :__init__.py.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/14 14:22
+'''

+ 178 - 0
generate/model_generate.py

@@ -0,0 +1,178 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :model_generate.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/14 14:22
+'''
+
+from typing import List, Dict, Any, Optional, AsyncGenerator
+from langchain_core.prompts import HumanMessagePromptTemplate
+from langchain_core.prompts import ChatPromptTemplate
+from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
+from langgraph.prebuilt import ToolNode
+
+from utils.utils import get_models
+from views import mcp_server
+from utils.yaml_utils import system_prompt_config
+from logger.loggering import server_logger
+
+
+class XiwuzcModelGenerateClient:
+    """
+        主要是生成式模型
+    """
+
+    def __init__(self):
+        # 获取部署的模型列表
+        llm, chat, embed = get_models()
+        self.llm = llm
+        self.chat = chat
+        # 构造工具列表
+        self.tool_node_list = [] # ToolNode(mcp_server.tools)
+        # 模型绑定工具列表
+        self.llm_with_tools = None #llm.bind_tools(mcp_server.tools)
+        # 工具调用系统提示词
+        self.system_prompt = "" #system_prompt_config["tools_system_prompt"]
+
+
+    def get_prompt_template(self):
+        """
+            构造普通Prompt提示词模板
+        """
+        human_template = """
+            {system_message}
+            用户的问题为:
+                {question}  
+            答案为:
+        """
+        human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
+        chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
+        return chat_prompt_template
+
+    def get_model_generate_stream(self, task_prompt_info: dict, op_id, input_query, context=None, supplement_info=None):
+        """
+            模型生成链
+        """
+        # Step 1: 定义系统提示词模板 system_prompt
+
+        # Step 2: 构建完整的 prompt 模板
+        prompt_template = ChatPromptTemplate.from_messages([
+            ("system", task_prompt_info["task_prompt"]),
+            ("human", "{input}")
+        ])
+        # Step 3: 初始化模型
+        # Step 4: 使用模板格式化输入
+        messages = prompt_template.invoke({"input": input_query})
+        # Step 5: 流式调用模型
+        response = self.llm.stream(messages)
+        # Step 6: 逐 token 输出(打字机效果)
+        for chunk in response:
+            yield chunk.content
+
+
+    async def get_model_tools_call(self, operate_id: str, session_id, task_prompt_info: dict, input_query, context=None, supplement_info=None,
+                           header_info=None):
+        """
+            工具调用
+        """
+         # 构建输入消息
+        input_message = self.get_input_context(
+                trace_id= operate_id,
+                task_prompt_info=task_prompt_info,
+                input_query=input_query,
+                context=context,
+                supplement_info=supplement_info,
+                header_info=header_info
+            )
+        # Step 1: 构建完整的 prompt 模板
+        prompt_template = ChatPromptTemplate.from_messages([
+            ("system", self.system_prompt),
+            ("human", "{input}")
+        ])
+        # Step 2: 调用带有工具的 LLM
+        # response = self.llm_with_tools.invoke(
+        #     [HumanMessage(content="北京的天气怎么样?")]
+        # )
+        messages = prompt_template.format_messages(input=input_message)
+        response = await self.llm_with_tools.ainvoke(messages)
+        #server_logger.info(f"response={response},{dir(response)}")
+        # 2. 检查是否有工具调用
+        if "tool_calls" in response.additional_kwargs:
+            # 构造符合要求的 AIMessage
+            tool_call_message = AIMessage(
+                content="",
+                additional_kwargs=response.additional_kwargs
+            )
+            server_logger.info(operate_id=operate_id, msg=f"self.tool_node_list={self.tool_node_list}")
+            # 传入格式化的消息
+            tool_response = await self.tool_node_list.ainvoke({"messages": [tool_call_message]})
+            #server_logger.info(operate_id=operate_id, msg=f"tool_response={tool_response}")
+            tools_message_result_list = []
+            for tools_message in tool_response["messages"]:
+                tools_message_result_list.append(tools_message.content)
+            result = "\n".join(tools_message_result_list)
+            server_logger.info(operate_id=operate_id, msg=f"tool_calls.tool_response.result={result}")
+            result = self.clean_json_output(result)
+            return result
+        else:
+            result = response.content
+            server_logger.info(operate_id=operate_id, msg=f"response.content={result}")
+            result = self.clean_json_output(result)
+            return result
+
+
+
+    def get_input_context(
+            self,
+            trace_id: str,
+            task_prompt_info: dict,
+            input_query: str,
+            context: Optional[str] = None,
+            supplement_info: Optional[str] = None,
+            header_info: Optional[Dict] = None
+    ) -> str:
+        #server_logger.info(f"task_prompt_info: {task_prompt_info}")
+        """构建问题和上下文"""
+        context = context or "无"
+        supplement_info = supplement_info or "无"
+        token = header_info.get('token', '') if header_info else ''
+        tenantId = header_info.get('tenantId', '') if header_info else ''
+        task_prompt_info_str = task_prompt_info["task_prompt"]
+
+        # 针对场景优化的上下文提示
+        base_context_prompt = """
+            日志链路跟踪ID:{trace_id}
+            任务信息:{task_prompt_info_str}
+            相关上下文数据:{context}
+            补充信息:{supplement_info}
+            户问题:{input}
+            安全验证:{token}
+            场ID:{tenantId}
+        """
+        return base_context_prompt.format(
+            trace_id=trace_id,
+            task_prompt_info_str=task_prompt_info_str,
+            context=context,
+            input=input_query,
+            supplement_info=supplement_info,
+            token=token,
+            tenantId=tenantId
+        )
+
+
+    def clean_json_output(self , raw_output: str) -> str:
+        """
+            去除开头和结尾的 ```json 和 ```
+        """
+        cleaned = raw_output.strip()
+        if cleaned.startswith("```json"):
+            cleaned = cleaned[7:]  # 去掉开头的 ```json
+        if cleaned.endswith("```"):
+            cleaned = cleaned[:-3]  # 去掉结尾的 ```
+        return cleaned.strip()
+
+#
+xwzc_generate_client = XiwuzcModelGenerateClient()

+ 35 - 0
gunicorn_config.py

@@ -0,0 +1,35 @@
+# !/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :gunicorn_config.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/23 09:07
+'''
+
+
+import multiprocessing
+
+# 基础配置
+bind = "0.0.0.0:8001"
+workers = multiprocessing.cpu_count() + 1  # 推荐公式
+worker_class = "uvicorn.workers.UvicornWorker"
+timeout = 120
+keepalive = 5
+
+# 日志配置
+accesslog = "./gunicorn_log/access_log.log"  # 输出到 stdout
+errorlog = "./gunicorn_log/error_log.log"   # 错误日志到 stderr
+loglevel = "info"
+
+# 性能优化
+max_requests = 1000     # 防止内存泄漏
+max_requests_jitter = 50
+graceful_timeout = 30   # 优雅停机时间
+
+# MCP 特定优化
+preload_app = True  # 减少内存占用,加速启动
+
+# 安全增强
+limit_request_line = 4094  # 防止过大请求头

+ 145 - 0
logger/loggering.py

@@ -0,0 +1,145 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :loggering.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/11 10:48
+'''
+from base.config import config_handler
+
+
+import os
+import sys
+import logging
+from logging.handlers import RotatingFileHandler
+
+
+class CompatibleLogger(logging.Logger):
+    """
+    完全兼容的日志记录器,继承自 logging.Logger
+    提供按级别分文件的日志记录,每个文件包含指定级别及更高级别的日志
+    """
+
+    def __init__(self, name, log_dir="logs", console_output=True,
+                 file_max_mb=10, backup_count=5,
+                 log_format=None, datefmt=None):
+        # 初始化父类
+        super().__init__(name)
+        self.setLevel(logging.DEBUG)  # 设置logger自身为最低级别
+
+        # 存储配置
+        self.log_dir = log_dir
+        self.console_output = console_output
+        self.file_max_bytes = file_max_mb * 1024 * 1024
+        self.backup_count = backup_count
+
+        # 设置日志格式
+        self._set_formatter(log_format, datefmt)
+
+        # 确保日志目录存在
+        os.makedirs(log_dir, exist_ok=True)
+
+        # 清除可能存在的旧处理器
+        if self.hasHandlers():
+            self.handlers.clear()
+
+        # 创建文件处理器
+        self._create_file_handlers()
+
+        # 创建控制台处理器
+        if console_output:
+            self._create_console_handler()
+
+    def _set_formatter(self, log_format, datefmt):
+        """设置日志格式"""
+        if log_format is None:
+            log_format = 'P%(process)d.T%(thread)d | %(asctime)s | %(levelname)-8s | %(trace_id)-10s | %(log_type)-5s | %(message)s'
+
+        if datefmt is None:
+            datefmt = '%Y-%m-%d %H:%M:%S'
+
+        self.formatter = logging.Formatter(log_format, datefmt)
+
+    def _create_file_handlers(self):
+        """为每个日志级别创建文件处理器,每个文件包含该级别及更高级别的日志"""
+        level_files = {
+            logging.DEBUG: os.path.join(self.log_dir, "agent_debug.log"),
+            logging.INFO: os.path.join(self.log_dir, "agent_info.log"),
+            logging.WARNING: os.path.join(self.log_dir, "agent_warning.log"),
+            logging.ERROR: os.path.join(self.log_dir, "agent_error.log"),
+            logging.CRITICAL: os.path.join(self.log_dir, "agent_critical.log"),
+        }
+
+        for level, filename in level_files.items():
+            handler = RotatingFileHandler(
+                filename=filename,
+                mode='a',
+                maxBytes=self.file_max_bytes,
+                backupCount=self.backup_count,
+                encoding='utf-8'
+            )
+            handler.setLevel(level)  # 设置级别为对应文件级别
+            handler.setFormatter(self.formatter)
+            # 为每个级别的日志文件都添加一个筛选器,确保记录该级别及其更高级别
+            handler.addFilter(lambda record, lvl=level: record.levelno >= lvl)
+            self.addHandler(handler)
+
+    def _create_console_handler(self):
+        """创建控制台日志处理器"""
+        console_handler = logging.StreamHandler(sys.stdout)
+        console_handler.setLevel(logging.INFO)
+        console_handler.setFormatter(self.formatter)
+        self.addHandler(console_handler)
+
+    def _log_with_context(self, level, msg, trace_id, log_type, *args, **kwargs):
+        """统一的日志记录方法"""
+        extra = kwargs.get('extra', {})
+        extra.update({
+            'trace_id': trace_id,
+            'log_type': log_type
+        })
+        kwargs['extra'] = extra
+        super().log(level, msg, *args, **kwargs)
+    
+
+
+    def debug(self, msg, *args, trace_id="", log_type="system", **kwargs):
+        self._log_with_context(logging.DEBUG, msg, trace_id, log_type, *args, **kwargs)
+
+    def info(self, msg, *args, trace_id="", log_type="system", **kwargs):
+        self._log_with_context(logging.INFO, msg, trace_id, log_type, *args, **kwargs)
+
+    def warning(self, msg, *args, trace_id="", log_type="system", **kwargs):
+        self._log_with_context(logging.WARNING, msg, trace_id, log_type, *args, **kwargs)
+
+    def error(self, msg, *args, trace_id="", log_type="system", **kwargs):
+        self._log_with_context(logging.ERROR, msg, trace_id, log_type, *args, **kwargs)
+    
+    def exception(self, msg, *args, trace_id="", log_type="system", exc_info=True, **kwargs):
+        """记录异常信息,包含堆栈跟踪"""
+        extra = kwargs.get('extra', {})
+        extra.update({
+            'trace_id': trace_id,
+            'log_type': log_type
+        })
+        kwargs['extra'] = extra
+        kwargs['exc_info'] = exc_info  # 确保异常信息被记录
+        super().error(msg, *args, **kwargs)  # 使用 error 级别记录异常
+
+    def critical(self, msg, *args, trace_id="", log_type="system", **kwargs):
+        self._log_with_context(logging.CRITICAL, msg, trace_id, log_type, *args, **kwargs)
+
+
+server_logger = CompatibleLogger(
+    name="agent_log",
+    log_dir=config_handler.get("log", "LOG_FILE_PATH" , "logs"),
+    console_output=False if config_handler.get("log", "CONSOLE_OUTPUT" , "True").upper() == "FALSE" else True,
+    file_max_mb=int(config_handler.get("log", "LOG_FILE_MAX_MB", "10")),
+    backup_count=int(config_handler.get("log", "LOG_BACKUP_COUNT", "5"))
+)
+
+
+# 设置日志级别
+server_logger.info("logging initialized")

+ 179 - 0
requirements.txt

@@ -0,0 +1,179 @@
+aiohappyeyeballs==2.6.1
+aiohttp==3.12.13
+aiosignal==1.4.0
+annotated-types==0.7.0
+anyio==4.9.0
+async-timeout==5.0.1
+attrs==25.3.0
+Authlib==1.6.0
+backoff==2.2.1
+bcrypt==4.3.0
+build==1.2.2.post1
+cachetools==5.5.2
+certifi==2025.7.9
+cffi==1.17.1
+charset-normalizer==3.4.2
+chromadb==1.0.15
+click==8.2.1
+coloredlogs==15.0.1
+concurrent-log-handler==0.9.28
+cryptography==45.0.5
+cyclopts==3.22.2
+dashscope==1.23.8
+dataclasses-json==0.6.7
+distro==1.9.0
+dnspython==2.7.0
+docstring_parser==0.16
+docutils==0.21.2
+durationpy==0.10
+email_validator==2.2.0
+exceptiongroup==1.3.0
+fastapi==0.116.0
+fastmcp==2.10.4
+filelock==3.18.0
+flatbuffers==25.2.10
+frozenlist==1.7.0
+fsspec==2025.5.1
+google-auth==2.40.3
+googleapis-common-protos==1.70.0
+greenlet==3.2.3
+grpcio==1.67.1
+gunicorn==23.0.0
+h11==0.16.0
+hf-xet==1.1.5
+httpcore==1.0.9
+httptools==0.6.4
+httpx==0.28.1
+httpx-sse==0.4.1
+huggingface-hub==0.33.2
+humanfriendly==10.0
+idna==3.10
+importlib_metadata==8.7.0
+importlib_resources==6.5.2
+iniconfig==2.1.0
+jieba==0.42.1
+jiter==0.10.0
+joblib==1.5.1
+jsonpatch==1.33
+jsonpointer==3.0.0
+jsonschema==4.24.0
+jsonschema-specifications==2025.4.1
+jsonschema_pydantic==0.6
+kubernetes==33.1.0
+langchain==0.3.26
+langchain-chroma==0.2.4
+langchain-community==0.3.27
+langchain-core==0.3.68
+langchain-mcp-adapters==0.1.9
+langchain-mcp-tools==0.2.10
+langchain-milvus==0.2.1
+langchain-openai==0.3.27
+langchain-text-splitters==0.3.8
+langgraph==0.5.2
+langgraph-checkpoint==2.1.0
+langgraph-prebuilt==0.5.2
+langgraph-sdk==0.1.72
+langserve==0.3.1
+langsmith==0.4.4
+markdown-it-py==3.0.0
+marshmallow==3.26.1
+mcp==1.10.1
+mdurl==0.1.2
+milvus-lite==2.5.1
+mmh3==5.1.0
+mpmath==1.3.0
+multidict==6.6.3
+mypy_extensions==1.1.0
+mysql-connector-python==9.3.0
+nest-asyncio==1.6.0
+nltk==3.9.1
+numpy==2.3.1
+oauthlib==3.3.1
+onnxruntime==1.22.0
+openai==1.93.3
+openapi-pydantic==0.5.1
+opentelemetry-api==1.34.1
+opentelemetry-exporter-otlp-proto-common==1.34.1
+opentelemetry-exporter-otlp-proto-grpc==1.34.1
+opentelemetry-proto==1.34.1
+opentelemetry-sdk==1.34.1
+opentelemetry-semantic-conventions==0.55b1
+orjson==3.10.18
+ormsgpack==1.10.0
+overrides==7.7.0
+packaging==24.2
+pandas==2.3.1
+pluggy==1.6.0
+portalocker==3.2.0
+posthog==5.4.0
+propcache==0.3.2
+protobuf==5.29.5
+psutil==7.0.0
+pyasn1==0.6.1
+pyasn1_modules==0.4.2
+pybase64==1.4.1
+pycparser==2.22
+pydantic==2.11.7
+pydantic-settings==2.10.1
+pydantic_core==2.33.2
+Pygments==2.19.2
+PyJWT==2.8.0
+pymilvus==2.5.12
+PyMuPDF==1.26.3
+PyMySQL==1.1.1
+pyperclip==1.9.0
+PyPika==0.48.9
+pyproject_hooks==1.2.0
+pytest==8.4.1
+pytest-asyncio==1.0.0
+python-dateutil==2.9.0.post0
+python-dotenv==1.1.1
+python-multipart==0.0.20
+pytz==2025.2
+PyYAML==6.0.2
+referencing==0.36.2
+regex==2024.11.6
+requests==2.32.4
+requests-oauthlib==2.0.0
+requests-toolbelt==1.0.0
+rich==14.0.0
+rich-rst==1.3.1
+rpds-py==0.26.0
+rsa==4.9.1
+setuptools==78.1.1
+shellingham==1.5.4
+six==1.17.0
+sniffio==1.3.1
+SQLAlchemy==2.0.41
+sse-starlette==2.4.1
+starlette==0.46.2
+sympy==1.14.0
+tenacity==9.1.2
+tiktoken==0.9.0
+tokenizers==0.21.2
+tqdm==4.67.1
+typer==0.16.0
+typing-inspect==0.9.0
+typing-inspection==0.4.1
+typing_extensions==4.14.1
+tzdata==2025.2
+ujson==5.10.0
+urllib3==2.5.0
+uv==0.7.20
+uvicorn==0.35.0
+uvloop==0.21.0
+watchfiles==1.1.0
+websocket-client==1.8.0
+websockets==15.0.1
+wheel==0.45.1
+xinference-client==1.7.1.post1
+xxhash==3.5.0
+yarl==1.20.1
+zhipuai==2.1.5.20250708
+zipp==3.23.0
+zstandard==0.23.0
+aioredis==2.0.1
+redis==6.2.0
+langgraph-checkpoint-postgres==2.0.23
+langgraph-checkpoint-redis==0.0.8
+langchain-redis==0.2.3

+ 64 - 0
run.sh

@@ -0,0 +1,64 @@
+#!/bin/bash
+
+# 服务管理脚本
+APP_NAME="xiwu_agent_server"         # 自定义服务名称
+PID_FILE="./gunicorn_log/gunicorn.pid"          # PID 文件路径
+LOG_FILE="./gunicorn_log/gunicorn.log"          # 日志文件路径
+START_COMMAND="gunicorn -c gunicorn_config.py server.app:app"
+
+case "$1" in
+    start)
+        if [ -f "$PID_FILE" ]; then
+            if kill -0 $(cat "$PID_FILE") >/dev/null 2>&1; then
+                echo "✅ $APP_NAME 已在运行 (PID: $(cat $PID_FILE))"
+                exit 1
+            else
+                rm -f "$PID_FILE"
+            fi
+        fi
+
+        echo "🚀 启动 $APP_NAME..."
+        nohup $START_COMMAND >> "$LOG_FILE" 2>&1 &
+        echo $! > "$PID_FILE"
+        echo "🟢 启动成功! PID: $(cat $PID_FILE)"
+        echo "📝 日志输出: $LOG_FILE"
+        ;;
+
+    stop)
+        if [ ! -f "$PID_FILE" ]; then
+            echo "🔴 $APP_NAME 未运行"
+            exit 1
+        fi
+
+        PID=$(cat "$PID_FILE")
+        echo "🛑 停止 $APP_NAME (PID: $PID)..."
+        kill -TERM $PID
+        rm -f "$PID_FILE"
+        echo "⭕ 已停止"
+        ;;
+
+    restart)
+        $0 stop
+        sleep 2
+        $0 start
+        ;;
+
+    status)
+        if [ -f "$PID_FILE" ]; then
+            if kill -0 $(cat "$PID_FILE") >/dev/null 2>&1; then
+                echo "🟢 $APP_NAME 正在运行 (PID: $(cat $PID_FILE))"
+            else
+                echo "❌ PID 文件存在但进程未运行"
+                rm -f "$PID_FILE"
+            fi
+        else
+            echo "🔴 $APP_NAME 未运行"
+        fi
+        ;;
+
+    *)
+        echo "使用方法: $0 {start|stop|restart|status}"
+        exit 1
+esac
+
+exit 0

+ 97 - 0
schemas/__init__.py

@@ -0,0 +1,97 @@
+import re
+
+
+def is_number(character: str):
+    """
+    判断是否为数字
+    """
+    return bool(re.match(r'^[-+]?\d+(\.\d+)?$', character)) if character else False
+
+
+
+def check_new_parameter(check_v, key, value):
+    """
+    请求前对数据进行校验
+    params: check_v: 一个list [type,(limit_condition)]
+    type 表示 value 应该用什么类型
+    limit_condition 就是检验的条件,可以不存在,如果是list则表示为其中一个,tuple则表示在此范围内
+    key: 是指被检查的参数的名称用以打印日志及返回报错
+    value: 是现在key所对应的具体的值,也是被用来被检查的值
+    return: None or error_msg
+    example:
+      [str, "name", (1, 64)] 表示 name应是字符串类型, 长度应大于等于1小于等于64
+      [list, "sex", [0, 1,...] 表示 sex应是列表,且值应该存在于list[0, 1]中
+      [int, "year", (1, 200)] 表示 year应是整数类型, 数值应大于等于1小于等于200
+    """
+    if check_v[0] == int:
+        if is_number(str(value)):
+            value = int(value)
+    if not isinstance(value, check_v[0]):
+        return "type error, %s should be %s, but now is %s" % (key, str(check_v[0]), value)
+
+    if len(check_v) == 2:
+        if check_v[0] == str:
+            if isinstance(check_v[1], list):
+                if value not in check_v[1]:
+                    return "Invalid param, %s is %s now, not in %s" % (key, value, str(check_v[1]))
+            if isinstance(check_v[1], tuple):
+                if len(value) < check_v[1][0] or len(value) > check_v[1][1]:
+                    return "Invalid param, %s is %s now, length is %s, range from %s to %s" % (key, value, len(value),
+                                                                                               check_v[0], check_v[1])
+        if check_v[0] == int:
+            if isinstance(check_v[1], list):
+                if value not in check_v[1]:
+                    return "Invalid param, %s is %s now, not in %s" % (key, value, str(check_v[1]))
+            if isinstance(check_v[1], tuple):
+                if value < check_v[1][0] or value > check_v[1][1]:
+                    return "Invalid param, %s is %s now, range from %s to %s" % (key, value, check_v[0], check_v[1])
+        if check_v[0] == list:
+            if isinstance(check_v[1], list):
+                for v in value:
+                    if v not in check_v[1]:
+                        return "Invalid param, %s is %s now, not in %s" % (key, v, str(check_v[1]))
+
+
+class CheckParams:
+    """
+    检验参数
+    """
+
+    def __init__(self, request_body, params_dict, logger, logger_name=None):
+        self.request_body = request_body
+        self.params_dict = params_dict
+        self.logger_name = logger_name
+
+        self.res = {
+            "code": 0,
+            "message": "ok",
+            "data": {"operate_id": ""}
+        }
+        self.logger = logger
+        self.body = dict()
+
+    def start(self):
+        """
+        开始校验
+        :return:
+        """
+        for k, v in self.params_dict.items():
+            if k not in self.request_body:
+                if v[0] is True:
+                    message = f"Invalid Access, {self.logger_name} %s is not Found in request" % k
+                    self.logger.error(message)
+                    self.res["message"] = message
+                    self.res["code"] = 400
+                    return False, self.res
+                else:
+                    continue
+            check_result = check_new_parameter(v[1:], k, self.request_body[k])
+            if check_result is None:
+                self.body[k] = self.request_body[k]
+            else:
+                message = f"Invalid Access, {self.logger_name} " + check_result
+                self.logger.error(message)
+                self.res["message"] = message
+                self.res["code"] = 400
+                return False, self.res
+        return True, self.body

+ 27 - 0
schemas/cattle_farm.py

@@ -0,0 +1,27 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :cattle_farm.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/11 12:41
+'''
+from typing import Optional
+
+from pydantic import BaseModel, constr, Field
+from enums.common_enums import BusinessSceneEnum, ErrorCodeEnum, UserRoleEnum
+
+
+class FarmConfig(BaseModel):
+    sessionId: constr(max_length=128) =Field(description="会话id")
+    userRole: constr(max_length=15) =Field(default=UserRoleEnum.COMMON.code,description="用户角色")
+
+
+
+class CattleFarm(BaseModel):
+    config: FarmConfig
+    input: Optional[str] = Field(description="用户输入")
+    businessScene: str = Field(default=None, description="业务场景")
+    context: Optional[str]  = Field(default=None, description="参考上下文")
+    supplementInfo: Optional[str] = Field(default=None, description="补充信息")

+ 46 - 0
server/app.py

@@ -0,0 +1,46 @@
+import os
+import sys
+
+
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.insert(0, BASE_DIR)
+
+from views import lifespan
+
+from fastapi.middleware.cors import CORSMiddleware
+
+from fastapi import FastAPI
+
+from logger.loggering import server_logger
+from views.cattle_farm_views import cattle_router
+
+
+# 创建 FastAPI 应用
+app = FastAPI(
+    title=" Agent API",
+    version="0.2",
+    description=" Agent+MCP API",
+    lifespan=lifespan
+)
+
+
+app.include_router(cattle_router)
+
+
+# 添加 CORS 中间件
+app.add_middleware(
+    CORSMiddleware,
+    allow_origins=["*"],  # 允许所有的来源
+    allow_credentials=True,
+    allow_methods=["*"],  # 允许的HTTP方法
+    allow_headers=["*"],  # 允许的请求头
+)
+
+
+server_logger.info(msg="APP init successfully")
+
+
+# 运行Uvicorn服务器
+if __name__ == "__main__":
+    import uvicorn
+    uvicorn.run(app, host="0.0.0.0", port=8001)

+ 10 - 0
server/cus_middlewares.py

@@ -0,0 +1,10 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :cus_middlewares.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/14 09:57
+'''
+

+ 54 - 0
test/test_redis.py

@@ -0,0 +1,54 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :test.py.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/11 12:23
+'''
+
+import os
+import sys
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+
+
+import redis
+#from langchain.storage import RedisStore
+from langchain_community.storage import RedisStore
+import asyncio
+from redis import Redis as SyncRedisClient
+
+async def main():
+    # 创建同步Redis客户端(注意不是aioredis)
+    redis_client = redis.Redis.from_url(
+        "redis://localhost:6379",
+        decode_responses=False  # LangChain需要bytes
+    )
+    # redis_client = SyncRedisClient.from_url(
+    #     "redis://localhost:6379",
+    #     decode_responses=False  # LangChain需要bytes
+    # )
+    
+    # 创建存储
+    store = RedisStore(client=redis_client)
+    
+    # 存储数据(注意是set不是aset)
+   
+   
+   # 存储数据:使用 mset(即使只有一个键)
+    store.mset([("test_key", b"test_value")])
+
+    # 获取数据:使用 mget
+    value = store.mget(["test_key"])
+    print(f"Retrieved: {value}")  # [b'test_value']
+
+    # 如果你想提取第一个值
+    if value and value[0] is not None:
+        print(f"Value: {value[0].decode('utf-8')}")  # 输出: Value: test_value
+
+
+    # 关闭连接(可选)
+    redis_client.close()
+
+asyncio.run(main())

+ 48 - 0
test/test_redis2.py

@@ -0,0 +1,48 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :test.py.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/11 12:23
+'''
+import os
+import sys
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+
+
+import redis
+#from langchain.storage import RedisStore
+from langchain_community.storage import RedisStore
+import asyncio
+from redis import Redis as SyncRedisClient
+
+async def main():
+    # 创建同步Redis客户端(注意不是aioredis)
+    redis_client = redis.Redis.from_url(
+        "redis://localhost:6379",
+        decode_responses=False  # LangChain需要bytes
+    )
+    # redis_client = SyncRedisClient.from_url(
+    #     "redis://localhost:6379",
+    #     decode_responses=False  # LangChain需要bytes
+    # )
+    
+    # 创建存储
+    store = RedisStore(client=redis_client)
+    # AttributeError: 'RedisStore' object has no attribute 'aset'. Did you mean: 'amset'?
+    # 异步存储数据(使用aset而非set)
+    await store.aset("test_key", b"test_value")
+    
+    # 异步获取数据(使用aget而非get)
+    value = await store.aget("test_key")
+    print(f"获取的值: {value}")  # 输出: b'test_value'
+    
+    # 异步删除数据(使用adelete而非delete)
+    await store.adelete("test_key")
+
+    # 关闭连接(可选)
+    redis_client.close()
+
+asyncio.run(main())

+ 88 - 0
test/test_redis3.py

@@ -0,0 +1,88 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :test.py.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/11 12:23
+'''
+
+import os
+import sys
+
+from sqlalchemy.orm import session
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+
+import asyncio
+from base.redis_connection import RedisConnectionFactory
+#from langchain_core.checkpoints import BaseCheckpointSaver
+
+async def main():
+    # 直接获取 RedisStore
+    redis_store = await RedisConnectionFactory.get_redis_store()
+    await redis_store.set("some_key", "some_value")
+    data = await redis_store.get("some_key") 
+    print(data)
+    # 2. 创建 checkpointer
+    # checkpointer = AsyncCheckpointSaver(store=redis_store)
+    # print(checkpointer)
+
+    # 直接获取 RedisStore
+    #self.redis_store = await RedisConnectionFactory.get_langchain_redis_store()
+    from langgraph.checkpoint.redis import RedisSaver
+    from redis import Redis
+    import redis     
+
+    # Step 1: 连接 Redis
+    redis_client = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
+    checkpointer = RedisSaver(redis_client=redis_client)
+    print(f"checkpointer={checkpointer}")
+
+    #from langchain.storage import RedisStore
+    #from langchain_community.storage import RedisStore
+    #redis_store = RedisStore.from_client(redis_client) 
+    #AttributeError: type object 'RedisStore' has no attribute 'from_client'
+    # 报错   UnboundLocalError: cannot access local variable 'RedisStore' where it is not associated with a value
+    #print(f"redis_store={redis_store}")
+
+
+    #from langchain_community.checkpoint import RedisCheckpointer
+    #from langchain.agents import RedisCheckpointer
+    #from langchain.memory import RedisCheckpointer
+    #checkpointer2 = RedisCheckpointer(redis_store)
+    #print(f"checkpointer2={checkpointer2}")
+    
+
+    #from langchain.storage import RedisStore # 已经过时
+    from langchain_community.storage import RedisStore  
+    from langchain.memory import ConversationBufferMemory
+    from langchain_community.chat_message_histories import RedisChatMessageHistory
+    session_id = "session_id"
+    # 初始化 Redis 存储
+    redis_url = "redis://localhost:6379/0"
+    #redis_store = RedisStore.from_url(redis_url)
+
+    redis_client = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
+    redis_store = RedisStore(client=redis_client)
+
+
+    # 使用 RedisChatMessageHistory 存储对话历史
+    chat_history = RedisChatMessageHistory(
+        session_id=session_id,  # 唯一标识会话
+        url=redis_url  # 或直接使用 redis_client
+    )
+    # 使用 Redis 存储记忆
+    memory = ConversationBufferMemory(
+        memory_key="chat_history",
+        return_messages=True,
+        chat_memory=chat_history  # 或其他兼容存储
+    )
+    print(f"memory={memory}")
+
+
+    # redis_client = Redis.from_url("redis://localhost:6379")
+    # checkpointer = RedisSaver(connection=redis_client)
+    # checkpointer.setup()
+
+asyncio.run(main())

+ 128 - 0
test/test_utils.py

@@ -0,0 +1,128 @@
+
+
+import os
+import sys
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+
+from datetime import datetime
+from typing import List, Dict, Optional
+
+from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
+from langchain_openai import ChatOpenAI
+
+from test_config import config_handler
+from logger.loggering import server_logger
+
+
+def get_models():
+    """
+    获取模型,模型类型 默认为deepseek 、qwen
+    """
+    model_type = config_handler.get("model", "MODEL_TYPE")
+    server_logger.info(f"get_models -> model_type:{model_type}")
+    if model_type.upper() == "QWEN":
+        return get_deploy_qwen_models()
+    return get_deepseek_models()
+
+
+def get_deepseek_models():
+    """
+    获取DeepSeek模型
+    """
+    deepseek_model_server_url = config_handler.get("deepseek", "DEEPSEEK_SERVER_URL")
+    deepseek_chat_model_id = config_handler.get("deepseek", "DEEPSEEK_MODEL_ID")
+    deepseek_api_key = config_handler.get("deepseek", "DEEPSEEK_API_KEY")
+    server_logger.info(f"get_deepseek_models -> chat_model_id:{deepseek_chat_model_id},api_key:{deepseek_api_key}")
+    if deepseek_model_server_url is None or deepseek_chat_model_id is None or deepseek_api_key is None:
+        server_logger.error("请设置环境变量: DEEPSEEK_SERVER_URL, DEEPSEEK_MODEL_ID, DEEPSEEK_API_KEY")
+        raise Exception("设置环境变量: DEEPSEEK_SERVER_URL, DEEPSEEK_MODEL_ID, DEEPSEEK_API_KEY")
+    # llm 大模型
+    llm = ChatOpenAI(base_url=deepseek_model_server_url,
+                     api_key=deepseek_api_key,
+                     model=deepseek_chat_model_id,
+                     max_tokens=4096,
+                     temperature=0.3,
+                     top_p=0.7,
+                     extra_body={
+                         "enable_thinking": False  # 添加这个参数以避免报错
+                     })
+    # chat 大模型
+    chat = ChatOpenAI(base_url=deepseek_model_server_url,
+                      api_key=deepseek_api_key,
+                      model=deepseek_chat_model_id,
+                      max_tokens=4096,
+                      temperature=0.3,
+                      top_p=0.2,
+                      extra_body={
+                          "enable_thinking": False  # 添加这个参数以避免报错
+                      })
+    embed = None
+    return llm, chat, embed
+
+
+# 获取千问模型
+def get_deploy_qwen_models():
+    """
+        加载千问系列大模型-魔搭在线Qwen3 API服务
+    """
+    model_server_url = config_handler.get("qwen", "MODEL_SERVER_URL")
+    chat_model_id = config_handler.get("qwen", "CHAT_MODEL_ID")
+    api_key = config_handler.get("qwen", "API_KEY")
+    embedding_model_id = config_handler.get("qwen", "EMBED_MODEL_ID")
+    # temperature = os.getenv("CHAT_MODEL_TEMPERATURE")
+    server_logger.info(
+        f"get_qwen_chat_model -> chat_model_id:{chat_model_id},api_key:{api_key},embedding_model_id:{embedding_model_id}")
+    if model_server_url is None or chat_model_id is None or api_key is None:
+        server_logger.error("请设置环境变量: MODEL_SERVER_URL, CHAT_MODEL_ID, API_KEY")
+        raise Exception("请设置环境变量: MODEL_SERVER_URL, CHAT_MODEL_ID, API_KEY")
+
+    # llm 大模型
+    llm = ChatOpenAI(base_url=model_server_url,
+                     api_key=api_key,
+                     model=chat_model_id,
+                     max_tokens=1024,
+                     temperature=0.5,
+                     top_p=0.7,
+                     extra_body={
+                         "enable_thinking": False  # 添加这个参数以避免报错
+                     })
+    # chat 大模型
+    chat = ChatOpenAI(base_url=model_server_url,
+                      api_key=api_key,
+                      model=chat_model_id,
+                      max_tokens=1024,
+                      temperature=0.01,
+                      top_p=0.2,
+                      extra_body={
+                          "enable_thinking": False  # 添加这个参数以避免报错
+                      })
+
+    # embedding 大模型 text-embedding-v3  text-embedding-v4
+    # from langchain_community.embeddings import DashScopeEmbeddings
+    embed = None  # DashScopeEmbeddings(model=embedding_model_id)
+    return llm, chat, embed
+
+
+def test_qwen_chat_model():
+    #  获取模型
+    llm, chat, embed = get_deploy_qwen_models()
+    example_query = "你好,你是谁?"
+    result = llm.invoke(input=example_query)
+    server_logger.info(f"result={result}")
+    print(f"result={result}")
+
+
+def test_deepseek_chat_model():
+    #  获取模型
+    llm, chat, embed = get_deepseek_models()
+    example_query = "你好,你是谁?"
+    result = llm.invoke(input=example_query)
+    server_logger.info(f"result={result}")
+    print(f"result={result}")
+
+
+
+
+if __name__ == "__main__":
+    test_qwen_chat_model()  # 运行
+    #test_deepseek_chat_model()

+ 32 - 0
test/test_yaml.py

@@ -0,0 +1,32 @@
+
+
+
+import os
+import sys
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+import yaml
+from logger.loggering import server_logger
+from utils.yaml_utils import system_prompt_config , get_business_scene_prompt
+
+
+# 测试yaml 文件读取
+def test_get_system_prompt():
+    system_prompt = system_prompt_config
+    server_logger.info(f"获取系统提示语,template: {system_prompt["template"]}")
+
+
+# 获取任务 提示词
+def test_get_task_prompt():
+    #business_scene = "common_model_query"
+    #business_scene = "cattle_farm_common"
+    #business_scene = "cattle_farm_query"
+    business_scene = "cattle_farm_warning_plan"
+    #business_scene = "cattle_farm_warning_task_execute"
+    business_scene_enum , task_prompt_config = get_business_scene_prompt(business_scene)
+    server_logger.info(f"获取系统提示语,business_scene_enum:{business_scene_enum},task_prompt: {task_prompt_config["task_prompt"]}")
+    server_logger.info(f"获取系统提示语,business_scene_enum:{business_scene_enum},template: {task_prompt_config["template"]}")
+
+
+if __name__ == '__main__':
+    #test_get_system_prompt() # 获取系统提示语
+    test_get_task_prompt() # 获取任务 提示词

+ 76 - 0
test/问题/流式推理打印推理过程.txt

@@ -0,0 +1,76 @@
+怎么通过如下代码 打印agent整个推理执行过程,if 'messages' in event and event['messages']: 这个条件不满足,
+导致无法打印  event["messages"][-1].pretty_print()
+ # 流式执行
+                events = self.agent_executor.astream_events(
+                    {"messages": all_messages},
+                    config=config,
+                    stream_mode="values"
+                )
+
+                full_response = []
+                buffer = []
+                last_flush_time = time.time()
+
+                # 流式处理事件
+                async for event in events:
+                        server_logger.info(trace_id=trace_id , msg=f"Event: {event}")
+                        if 'messages' in event and event['messages']:
+                            event["messages"][-1].pretty_print()
+                   
+                    if 'chunk' in event['data'] and "on_chat_model_stream" in event['event']:
+                        chunk = event['data']['chunk'].content
+                        full_response.append(chunk)
+
+                        # 缓冲管理策略
+                        buffer.append(chunk)
+                        current_time = time.time()
+
+                        # 满足以下任一条件即刷新缓冲区
+                        if (len(buffer) >= 3 or  # 达到最小块数
+                                (current_time - last_flush_time) > 0.5 or  # 超时
+                                any(chunk.endswith((c, f"{c} ")) for c in
+                                    ['.', '。', '!', '?', '\n', ';', ';'])):  # 自然断点
+
+                            # 合并并发送缓冲内容
+                            combined = ''.join(buffer)
+                            yield combined
+
+                            # 重置缓冲
+                            buffer.clear()
+                            last_flush_time = current_time
+
+P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | REQUEST | 开始请求: POST http://localhost:8001/queryex/stream, start_time=1756173489.7427042
+P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | config=FarmConfig(sessionId='D00032') input='查询10号信息' businessScene=None context=None supplementInfo=None
+P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     |            | system | [使用用户最新历史记录作为意图识别]use_history_recognize_intent: True
+P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     |            | system | get_models -> model_type:deepseek
+P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     |            | system | get_deepseek_models -> chat_model_id:deepseek-chat,api_key:sk-479e4ea23d8e42bfb982a54137094a7b
+P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | redis 内存上下文历史初始完成=chat_memory=<langchain_community.chat_message_histories.redis.RedisChatMessageHistory object at 0x110ac96a0> return_messages=True memory_key='chat_history'
+P31216.T140704636194752 | 2025-08-26 09:58:09 | INFO     |            | system | 增加用户历史记录,用于意图识别,prompt配置.system_prompt: 基于提供的样例,结合用户最近的对话历史上下文进行意图识别,精准匹配对应的业务场景指令。
+必须优先参考最近的上下文语义及用户意图演变,若问题与样例中的任一业务场景相符,则返回对应指令;若无法匹配任何已定义场景,则返回 cattle_farm_common。
+严格遵守:仅输出指令字符串,不附加任何解释、说明或格式。
+用户目前历史上下文信息:
+无
+
+P31216.T140704636194752 | 2025-08-26 09:58:13 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | 使用意图识别:business_scene=cattle_farm_query
+P31216.T140704636194752 | 2025-08-26 09:58:13 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | business_scene_enum:cattle_farm_query Get prompt successfully.
+P31216.T140704636194752 | 2025-08-26 09:58:13 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | queryex | session_id:D00032, business_scene:cattle_farm_query,final_result_data_type:Markdown ,input_data: 查询10号信息
+P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | 固定问题意图识别系统: input=查询10号信息, result=question_nine
+P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | 配置固定问题意图识别结果列表: recognize_intent_out_list=['question_one', 'question_two', 'question_three', 'question_four', 'question_five', 'question_six', 'question_seven', 'question_eight']
+P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | fixed_flag=False,answer_result=查询10号信息
+P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     | 6344dda5-19f5-4e46-94b9-9a349ad95db3 | system | config=FarmConfig(sessionId='D00032') input='查询10号信息' businessScene=None context=None supplementInfo=None
+P31216.T140704636194752 | 2025-08-26 09:58:16 | INFO     |            | system | 系统提示词 system_prompt:家于一身的AI助手"",专注于为中提供全方位的智能化指导。
+      你的建议要务实、经济、易操作,并能基于物联网数据提供精准预警和具体解决方案。
+          1. 输出要求:
+            - 默认以Markdown文本格式输出
+            - 如果明确返回json格式,请严格规范的json格式(不包含任何额外说明文字)
+          2. 内容规范:
+            - 禁止包含任何敏感信息(密钥/Token/API等)
+          3. 解析数据规则
+            - 参数说明:返回数据类型 data_type 默认为 text
+            - 工具返回的json结构,其中data中的是需要数据字段,code是错误码,msg是错误信息
+          4. json响应模版:
+              {{
+                 "字段1": "值1",
+                 "字段2": "值2",
+                 "字段3": "值3"
+               }}

+ 76 - 0
utils/common.py

@@ -0,0 +1,76 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :common.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/11 11:36
+'''
+import time
+import uuid
+from functools import wraps
+
+
+def return_json(code=0, msg='ok', business_scene=None, data=None, trace_id=str(uuid.uuid4()), data_type="text", page=0, page_size=10, *args, **kwargs):
+    res = {
+        "code": code,
+        "message": msg,
+        "page": page,
+        "page_size": page_size,
+        "trace_id": trace_id,
+        "business_scene": business_scene,
+    }
+    if data:
+        if args:
+            data += args
+        data['dataType'] = data_type
+    res['data'] = data
+
+    if kwargs:
+        res.update(kwargs)
+    return res
+
+
+def calcu_run_time(logger, name: str):
+    """
+    执行时间统计装饰器
+    :param logger: log obj
+    :param name: log name
+    :return:
+    """
+
+    def inner_fuc(func):
+        @wraps(func)
+        def calcu_wrapper(*args, **kwargs):
+            start_time = float("%.3f" % time.time())
+            logger.info(f"{name}_start_time: {start_time}")
+            result = func(*args, **kwargs)
+            end_time = float("%.3f" % time.time())
+            logger.info(f"{name}_end_time: {end_time}")
+            logger.info("request_total_cost_time: {}".format(end_time - start_time))
+            return result
+
+        return calcu_wrapper
+
+    return inner_fuc
+
+
+
+def handler_err(logger, err, trace_id: str="", err_name: str=""):
+    """
+    日志格式化
+    返回具体错误
+    报错文件
+    报错行数
+    :param logger: log obj
+    :param err: error obj
+    :param operate_id: 操作id, default=""
+    :param err_name: error name, default=""
+    """
+    trace_id = trace_id if trace_id else f"{uuid.uuid4()}"
+    logger.error(trace_id=trace_id, log_type=err_name, msg=f'error file: {err}')
+    logger.error(trace_id=trace_id, log_type=err_name, msg=f'data error file: {err.__traceback__.tb_frame.f_globals["__file__"]}')
+    logger.error(trace_id=trace_id, log_type=err_name, msg=f"data error line: {err.__traceback__.tb_lineno}")
+    logger.exception(trace_id=trace_id, log_type=err_name, msg=f"Error Stack trace:")
+

+ 63 - 0
utils/redis_utils.py

@@ -0,0 +1,63 @@
+
+import json
+from logger.loggering import server_logger
+from base.redis_connection import RedisConnectionFactory
+from base.config import config_handler
+# 缓存数据有效期 默认 3 分钟
+CACHE_DATA_EXPIRED_TIME = 3 * 60
+
+
+
+
+
+
+async def set_redis_result_cache_data(data_type: str , trace_id: str, value: str):
+    """
+      设置redis结果缓存数据
+        @param data_type: 数据类型,基本信息 cattle_info、体温信息 cattle_temperature 、步数信息 cattle_walk
+        @param trace_id: 链路跟踪ID
+        @param value: 缓存数据
+    """
+    expired_time = config_handler.get("api", "CACHE_DATA_EXPIRED_TIME" , CACHE_DATA_EXPIRED_TIME)
+    key = f"{trace_id}:{data_type}"
+     # 直接获取 RedisStore
+    redis_store = await RedisConnectionFactory.get_redis_store()
+    await redis_store.set(key, value , ex=expired_time) 
+
+
+
+
+async def get_redis_result_cache_data(data_type: str , trace_id: str):
+    """
+      获取redis结果缓存数据
+        @param data_type: 数据类型,基本信息 cattle_info、体温信息 cattle_temperature 、步数信息 cattle_walk
+        @param trace_id: 链路跟踪ID
+    """
+    key = f"{trace_id}:{data_type}"
+     # 直接获取 RedisStore
+    redis_store = await RedisConnectionFactory.get_redis_store()
+    value = await redis_store.get(key) 
+    return value
+
+
+
+async def get_redis_result_cache_data_and_delete_key(data_type: str , trace_id: str):
+    """
+      获取redis结果缓存数据
+        @param data_type: 数据类型,基本信息 cattle_info、体温信息 cattle_temperature 、步数信息 cattle_walk
+        @param trace_id: 链路跟踪ID
+    """
+    key = f"{trace_id}:{data_type}"
+     # 直接获取 RedisStore
+    redis_store = await RedisConnectionFactory.get_redis_store()
+    value = await redis_store.get(key) 
+    server_logger.info(f"获取redis结果缓存数据: {key}-{value}")
+    if value is None:
+        return None
+    # 第一步:转成字符串(decode)
+    json_str = value.decode('utf-8')
+    # 第二步:解析 JSON
+    data = json.loads(json_str)
+    # 删除key
+    #await redis_store.delete(key)
+    return data

+ 114 - 0
utils/request_tool.py

@@ -0,0 +1,114 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :request_tool.py
+@IDE        :Cursor
+@Author     : 
+@Date       :2025/08/21
+'''
+import json
+import requests
+import time
+from requests.exceptions import RequestException
+from base.config import config_handler
+from logger.loggering import server_logger
+from utils.common import handler_err
+
+
+class KnowledgeDifyAction:
+    """
+    与后端http对接交互类,支持 Token 认证和会话保持
+    """
+
+    def __init__(self, token=None, use_session=True):
+        """
+        初始化连接
+        :param host: 主机地址
+        :param port: 端口号
+        :param token: 认证令牌(可选)
+        :param use_session: 是否使用会话保持(默认True)
+        """
+        self.dify_server_url = config_handler.get("knowledge_dify","DIFY_SERVER_URL")
+        self.dify_api_key = config_handler.get("knowledge_dify","DIFY_API_KEY")
+        self.dify_dataset_id_list_str = config_handler.get("knowledge_dify","DIFY_DATASET_ID_LIST")
+        self.dify_dataset_id_list = [dataset_id for dataset_id in self.dify_dataset_id_list_str.split(",")]
+        self.token = self.dify_api_key
+        self.use_session = use_session
+       
+        # 创建会话对象(如果需要会话保持)
+        if self.use_session:
+            self.session = requests.Session()
+            # 如果提供了token,添加到会话头
+            if self.token:
+                self.session.headers.update({"Authorization": f"Bearer {self.token}"})
+
+    
+
+
+    def get_request_knowledge_retrieve_list(self , params=None, headers=None, timeout=20, trace_id=""):
+        """
+           dify 批量知识库检索
+            :param params: 请求参数(可选)
+            :param headers: 额外请求头(可选)
+            :param timeout: 超时时间(秒,默认5秒)
+            :param trace_id: 操作id
+            :return: 响应内容(json格式data 内容)
+        """
+        server_logger.info(trace_id=trace_id, msg=f"开始执行 dify 批量知识库检索,dify_dataset_id_list:{self.dify_dataset_id_list}")
+        all_record_list = []
+        for dataset_id in self.dify_dataset_id_list:
+            response = self.get_request_knowledge_retrieve(trace_id=trace_id ,dataset_id=dataset_id , params=params , headers=headers , timeout=timeout)
+             # 判断返回结果 ,如果 存在code 则返回错误
+            if "code" in response:
+                error_msg = f"检索知识库内容失败,详细信息:{response['status']-{response["code"]}-{response['message']}}"
+                server_logger.info(f"dataset_id:{dataset_id}, {error_msg}", trace_id=trace_id)
+                continue
+            # 把 所有 检索列表 合并 为一个 list
+            all_record_list += response["records"]
+
+        return all_record_list
+
+
+
+
+    def get_request_knowledge_retrieve(self , dataset_id , params=None, headers=None, timeout=20, trace_id=""):
+        """
+            dify 知识检索 -发送HTTP请求
+            :param params: 请求参数(可选)
+            :param headers: 额外请求头(可选)
+            :param timeout: 超时时间(秒,默认5秒)
+            :param trace_id: 操作id
+            :return: 响应内容(json格式data 内容)
+        """
+        dify_dataset_url = config_handler.get("knowledge_dify","DIFY_DATASET_URL")
+        # 替换 知识库id 配置
+        dify_dataset_url = dify_dataset_url.format(dataset_id=dataset_id)
+         # 准备请求头
+        request_headers = {}
+        request_headers["Content-Type"] = "application/json"
+        if self.token:  # 非会话模式需单独添加token
+            request_headers["Authorization"] = f"Bearer {self.token}"
+        if headers:  # 合并额外请求头
+            request_headers.update(headers)
+        request_url = "{dify_server_url}{dify_dataset_url}".format(
+            dify_server_url=self.dify_server_url, dify_dataset_url=dify_dataset_url)
+        server_logger.info(f"{request_url}, header: {request_headers}, params={params}", trace_id=trace_id)
+
+        try:
+            response = requests.post(
+                                request_url,
+                                json=params,
+                                headers=request_headers,
+                                timeout=timeout
+                            )
+            # 检查HTTP状态码
+            response_data = response.json()
+            server_logger.info(f"url: {request_url}, dataset_id:{dataset_id}, response: {response_data}", trace_id=trace_id)
+            return response_data
+        except RequestException as e:
+            handler_err(server_logger, trace_id=trace_id, err=e, err_name='get_request_knowledge_retrieve')
+            #raise e
+        return {}
+    
+    

+ 116 - 0
utils/tool_utils.py

@@ -0,0 +1,116 @@
+import time
+from math import log
+import os
+from dotenv import load_dotenv
+from enums.common_enums import BusinessSceneEnum, ErrorCodeEnum, UserRoleEnum
+from functools import wraps
+
+from logger.loggering import server_logger
+from utils.common import handler_err
+from base.config import config_handler
+
+# 获取当前文件的目录
+current_dir = os.path.dirname(__file__)
+# 构建到 .env 的相对路径
+conf_file_path = os.path.join(current_dir , '../',  '.env')
+#server_logger.info(f"当前目录: {conf_file_path}")
+# 加载环境变量
+load_dotenv(dotenv_path=conf_file_path)
+
+def verify_param(param: dict):
+    """
+        验证请求参数
+    """
+    input_data = param.get("input")
+    session_id = param.get("config").get("session_id")
+    businessScene = param.get("businessScene")
+    if input_data is None:
+        raise ValueError(ErrorCodeEnum.INPUT_INFO_EMPTY.__str__)
+    if session_id is None:
+        raise ValueError(ErrorCodeEnum.SESSION_ID_EMPTY.__str__)
+    # 是否可使用默认的通用模型查询 默认 False
+    use_default_common_model_query = os.environ.get("USE_DEFAULT_COMMON_MODEL_QUERY" , False)
+    server_logger.info(f"使用可默认的通用模型查询: {use_default_common_model_query}")
+    
+    if not use_default_common_model_query:
+        if businessScene is None:
+            raise ValueError(ErrorCodeEnum.BUSINSESS_SCENE_EMPTY.__str__)
+        if not BusinessSceneEnum.get_item_by_code(param.get('businessScene')):
+            raise ValueError(ErrorCodeEnum.BUSINSESS_SCENE_ERROR.__str__)
+
+
+
+def get_system_prompt() -> str:
+    """
+        获取系统提示语
+    """
+    system_prompt = config_handler.get("system", "SYSTEM_PROMPT")
+    server_logger.info(f"获取系统提示语: {system_prompt}")
+    return str(system_prompt)
+
+
+
+def get_business_scene_prompt(business_scene):
+    """
+        获取业务场景的提示语
+    """
+    # 默认公共查询提示语
+    business_scene_enum = BusinessSceneEnum.COMMON_MODEL_QUERY
+    prompt_file = business_scene_enum.prompt_file
+     # 是否可使用默认的通用模型查询 默认 False
+    use_default_common_model_query = os.environ.get("USE_DEFAULT_COMMON_MODEL_QUERY" , False)
+    if not business_scene is None:
+        business_scene_enum = BusinessSceneEnum.get_item_by_code(business_scene)
+        if not business_scene_enum:
+            raise ValueError("未找到枚举值")
+        if business_scene_enum.prompt_file is None:
+            raise ValueError("业务场景不存在")
+        prompt_file = business_scene_enum.prompt_file
+    
+    prompt_file = os.path.join(current_dir , '../', 'config', 'prompt' , prompt_file)
+    server_logger.info(f"获取业务场景提示语: {prompt_file}")
+    if not os.path.exists(prompt_file):
+        raise ValueError("业务场景不存在")
+    
+    try:
+        with open(prompt_file, 'r', encoding='utf-8') as f:
+          return business_scene_enum , '\n'.join(f.readlines())
+    except Exception as e:
+        handler_err(server_logger, e,err_name="get_business_scene_prompt")
+        server_logger.error(f"获取业务场景提示语失败: {e}")
+        raise e
+
+
+def get_fixed_problem_answer_txt_content(file_name: str):
+    """
+        获取固定问题答案内容
+    """
+    file_name = file_name+".txt"
+    answer_txt_file = os.path.join(current_dir , '../', 'config', 'fixed_answer' , file_name)
+    server_logger.info(f"固定回答文本内容: {answer_txt_file}")
+    if not os.path.exists(answer_txt_file):
+        raise ValueError("固定回答文本不存在")
+    
+    try:
+        result_list = []
+        with open(answer_txt_file, 'r', encoding='utf-8') as f:
+           result_list=f.readlines()
+        return "".join(result_list)
+    except Exception as e:
+        handler_err(server_logger, e,err_name="get_fixed_problem_answer_txt_content")
+        server_logger.error(f"获取固定回答文本失败: {e}")
+        raise e
+
+
+
+
+def verify_user_role(user_role: str):
+    """
+        验证用户角色
+            普通用户 common  ,不能检索查询知识库
+            租户用户 tenant  ,只有租户才能检索查询知识库
+    """
+    if user_role in [UserRoleEnum.TENANT.code]:
+        return True
+    
+    return False 

+ 205 - 0
utils/utils.py

@@ -0,0 +1,205 @@
+import json
+import time
+import uuid
+from datetime import datetime
+from typing import List, Dict, Optional
+
+from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
+from langchain_openai import ChatOpenAI
+
+from base.config import config_handler
+from logger.loggering import server_logger
+
+
+def get_models():
+    """
+    获取模型,模型类型 默认为deepseek 、qwen
+    """
+    model_type = config_handler.get("model", "MODEL_TYPE")
+    server_logger.info(f"get_models -> model_type:{model_type}")
+    if model_type.upper() == "QWEN":
+        return get_deploy_qwen_models()
+    return get_deepseek_models()
+
+
+def get_deepseek_models():
+    """
+    获取DeepSeek模型
+    """
+    deepseek_model_server_url = config_handler.get("deepseek", "DEEPSEEK_SERVER_URL")
+    deepseek_chat_model_id = config_handler.get("deepseek", "DEEPSEEK_MODEL_ID")
+    deepseek_api_key = config_handler.get("deepseek", "DEEPSEEK_API_KEY")
+    server_logger.info(f"get_deepseek_models -> chat_model_id:{deepseek_chat_model_id},api_key:{deepseek_api_key}")
+    if deepseek_model_server_url is None or deepseek_chat_model_id is None or deepseek_api_key is None:
+        server_logger.error("请设置环境变量: DEEPSEEK_SERVER_URL, DEEPSEEK_MODEL_ID, DEEPSEEK_API_KEY")
+        raise Exception("设置环境变量: DEEPSEEK_SERVER_URL, DEEPSEEK_MODEL_ID, DEEPSEEK_API_KEY")
+    # llm 大模型
+    llm = ChatOpenAI(base_url=deepseek_model_server_url,
+                     api_key=deepseek_api_key,
+                     model=deepseek_chat_model_id,
+                     max_tokens=4096,
+                     temperature=0.3,
+                     top_p=0.7,
+                     extra_body={
+                         "enable_thinking": False  # 添加这个参数以避免报错
+                     })
+    # chat 大模型
+    chat = ChatOpenAI(base_url=deepseek_model_server_url,
+                      api_key=deepseek_api_key,
+                      model=deepseek_chat_model_id,
+                      max_tokens=4096,
+                      temperature=0.3,
+                      top_p=0.2,
+                      extra_body={
+                          "enable_thinking": False  # 添加这个参数以避免报错
+                      })
+    embed = None
+    return llm, chat, embed
+
+
+# 获取千问模型
+def get_deploy_qwen_models():
+    """
+        加载千问系列大模型-魔搭在线Qwen3 API服务
+    """
+    model_server_url = config_handler.get("qwen", "MODEL_SERVER_URL")
+    chat_model_id = config_handler.get("qwen", "CHAT_MODEL_ID")
+    api_key = config_handler.get("qwen", "API_KEY")
+    embedding_model_id = config_handler.get("qwen", "EMBED_MODEL_ID")
+    # temperature = os.getenv("CHAT_MODEL_TEMPERATURE")
+    server_logger.info(
+        f"get_qwen_chat_model -> chat_model_id:{chat_model_id},api_key:{api_key},embedding_model_id:{embedding_model_id}")
+    if model_server_url is None or chat_model_id is None or api_key is None:
+        server_logger.error("请设置环境变量: MODEL_SERVER_URL, CHAT_MODEL_ID, API_KEY")
+        raise Exception("请设置环境变量: MODEL_SERVER_URL, CHAT_MODEL_ID, API_KEY")
+
+    # llm 大模型
+    llm = ChatOpenAI(base_url=model_server_url,
+                     api_key=api_key,
+                     model=chat_model_id,
+                     max_tokens=1024,
+                     temperature=0.5,
+                     top_p=0.7,
+                     extra_body={
+                         "enable_thinking": False  # 添加这个参数以避免报错
+                     })
+    # chat 大模型
+    chat = ChatOpenAI(base_url=model_server_url,
+                      api_key=api_key,
+                      model=chat_model_id,
+                      max_tokens=1024,
+                      temperature=0.01,
+                      top_p=0.2,
+                      extra_body={
+                          "enable_thinking": False  # 添加这个参数以避免报错
+                      })
+
+    # embedding 大模型 text-embedding-v3  text-embedding-v4
+    # from langchain_community.embeddings import DashScopeEmbeddings
+    embed = None  # DashScopeEmbeddings(model=embedding_model_id)
+    return llm, chat, embed
+
+
+def test_qwen_chat_model():
+    #  获取模型
+    llm, chat, embed = get_deploy_qwen_models()
+    example_query = "你好,你是谁?"
+    result = llm.invoke(input=example_query)
+    server_logger.info(f"result={result}")
+    print(f"result={result}")
+
+
+def test_deepseek_chat_model():
+    #  获取模型
+    llm, chat, embed = get_deepseek_models()
+    example_query = "你好,你是谁?"
+    result = llm.invoke(input=example_query)
+    server_logger.info(f"result={result}")
+    print(f"result={result}")
+
+
+def serialize_messages(messages: List[Dict]) -> str:
+    """序列化消息列表为JSON字符串"""
+    return json.dumps(messages)
+
+
+def deserialize_messages(data: str) -> List[Dict]:
+    """反序列化JSON字符串为消息列表"""
+    return json.loads(data) if data else []
+
+
+def to_langchain_messages(messages: List[Dict]) -> List:
+    """将消息字典转换为LangChain消息对象"""
+    langchain_messages = []
+    for msg in messages:
+        if msg["role"] == "user":
+            langchain_messages.append(HumanMessage(content=msg["content"]))
+        elif msg["role"] == "assistant":
+            langchain_messages.append(AIMessage(content=msg["content"]))
+        elif msg["role"] == "system":
+            langchain_messages.append(SystemMessage(content=msg["content"]))
+    return langchain_messages
+
+
+def generate_session_id() -> str:
+    """生成唯一的会话ID"""
+    return f"xiwuzc-{uuid.uuid4()}"
+
+
+def get_current_timestamp() -> float:
+    """获取当前时间戳"""
+    return time.time()
+
+
+def format_timestamp(timestamp: float) -> str:
+    """格式化时间戳为可读字符串"""
+    return datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
+
+
+def build_input_context(
+        trace_id: str,
+        task_prompt_info: dict,
+        input_query: str,
+        context: Optional[str] = None,
+        supplement_info: Optional[str] = None,
+        header_info: Optional[Dict] = None
+) -> str:
+    """构建场景优化的上下文提示"""
+    context = context or "无相关数据"
+    supplement_info = supplement_info or "无补充信息"
+    token = header_info.get('token', '') if header_info else ''
+    tenantId = header_info.get('tenantId', '') if header_info else ''
+    task_prompt_info_str = task_prompt_info["task_prompt"]
+
+    return f"""
+🐄 助手会话 [ID: {trace_id}] 🐖
+⏰ 时间: {format_timestamp(get_current_timestamp())}
+📌 任务: {task_prompt_info_str}
+
+📊 相关数据:
+{context}
+
+ℹ️ 补充信息:
+{supplement_info}
+
+❓ 户问题:
+{input_query}
+
+🔒 安全验证: {token}
+🏠 场ID: {tenantId}
+""".strip()
+
+
+def clean_json_output(raw_output: str) -> str:
+    """去除开头和结尾的 ```json 和 ```"""
+    cleaned = raw_output.strip()
+    if cleaned.startswith("```json"):
+        cleaned = cleaned[7:]
+    if cleaned.endswith("```"):
+        cleaned = cleaned[:-3]
+    return cleaned.strip()
+
+
+if __name__ == "__main__":
+    test_qwen_chat_model()  # 运行
+    # test_deepseek_chat_model()

+ 166 - 0
utils/yaml_utils.py

@@ -0,0 +1,166 @@
+
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :yaml_utils.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/10 17:32
+'''
+
+import os
+import yaml
+from logger.loggering import server_logger
+
+import os
+from dotenv import load_dotenv
+from enums.common_enums import BusinessSceneEnum, ErrorCodeEnum
+from functools import wraps
+
+from logger.loggering import server_logger
+from utils.common import handler_err
+from base.config import config_handler
+
+# 获取当前文件的目录
+current_dir = os.path.dirname(__file__)
+# 构建到 .env 的相对路径
+conf_file_path = os.path.join(current_dir , '../',  '.env')
+#server_logger.info(f"当前目录: {conf_file_path}")
+
+
+
+def get_fixed_problem_answer() -> dict:
+    """
+        固定问题回答
+    """
+     # 构建文件路径 判断文件是否存在
+    yaml_file = get_yaml_file_path("fixed_problem_answer.yaml")
+    
+    try:
+        with open(yaml_file, 'r', encoding='utf-8') as f:
+            prompt_config = yaml.safe_load(f)
+        # 验证必需字段
+        #validate_prompt_config(prompt_config, prompt_name)
+        server_logger.info(f"成功加载[固定问题]回答系统配置.fixed_problem_answer: {prompt_config["fixed_problem_answer"]}")
+        return prompt_config
+        
+    except Exception as e:
+        server_logger.error(f"加载[固定问题]回答fixed_problem_answer文件失败: {yaml_file}, 错误: {str(e)}")
+        raise
+
+
+
+def get_intent_prompt() -> dict:
+    """
+        获取意图识别 系统提示语
+    """
+     # 构建文件路径 判断文件是否存在
+    yaml_file = get_yaml_file_path("intent_prompt.yaml")
+    
+    try:
+        with open(yaml_file, 'r', encoding='utf-8') as f:
+            prompt_config = yaml.safe_load(f)
+        # 验证必需字段
+        #validate_prompt_config(prompt_config, prompt_name)
+        server_logger.info(f"成功加载[意图识别]系统.system_prompt配置: {prompt_config["system_prompt"]}")
+        server_logger.info(f"成功加载[意图识别]系统配置.examples: {prompt_config["intent_examples"]}")
+        return prompt_config
+        
+    except Exception as e:
+        server_logger.error(f"加载意图识别intent_prompt文件失败: {yaml_file}, 错误: {str(e)}")
+        raise
+
+
+
+def get_fixed_question_intent_prompt() -> dict:
+    """
+        获取 固定问题意图识别 系统提示语
+    """
+     # 构建文件路径 判断文件是否存在
+    yaml_file = get_yaml_file_path("fixed_intent_prompt.yaml")
+    
+    try:
+        with open(yaml_file, 'r', encoding='utf-8') as f:
+            prompt_config = yaml.safe_load(f)
+        # 验证必需字段
+        #validate_prompt_config(prompt_config, prompt_name)
+        server_logger.info(f"成功加载[固定问题意图识别]系统.system_prompt配置: {prompt_config["system_prompt"]}")
+        server_logger.info(f"成功加载[固定问题意图识别]系统配置.examples: {prompt_config["fixed_problem_answer"]}")
+        return prompt_config
+        
+    except Exception as e:
+        server_logger.error(f"加载意图识别fixed_intent_prompt文件失败: {yaml_file}, 错误: {str(e)}")
+        raise
+
+
+def get_system_prompt() -> dict:
+    """
+        获取系统提示语
+    """
+     # 构建文件路径 判断文件是否存在
+    yaml_file = get_yaml_file_path("system_prompt.yaml")
+    
+    try:
+        with open(yaml_file, 'r', encoding='utf-8') as f:
+            prompt_config = yaml.safe_load(f)
+        # 验证必需字段
+        #validate_prompt_config(prompt_config, prompt_name)
+        server_logger.info(f"成功加载系统system_prompt配置: {prompt_config["system_prompt"]}")
+        return prompt_config
+        
+    except Exception as e:
+        server_logger.error(f"加载system_prompt文件失败: {yaml_file}, 错误: {str(e)}")
+        raise
+
+
+
+
+
+def get_business_scene_prompt(trace_id, business_scene) -> tuple[BusinessSceneEnum , dict]:
+    """
+        获取业务场景的提示语
+    """
+    # 默认公共查询提示语
+    business_scene_enum = BusinessSceneEnum.COMMON_MODEL_QUERY
+    prompt_file = business_scene_enum.prompt_file
+    if not business_scene is None:
+        # 2025-07-25 修改 如果未找到 返回默认值通用场景
+        business_scene_enum = BusinessSceneEnum.get_item_by_code_def_val(business_scene , BusinessSceneEnum.CATTLE_FARM_COMMMON)
+        if not business_scene_enum:
+            raise ValueError("未找到枚举值")
+        if business_scene_enum.prompt_file is None:
+            raise ValueError("业务场景不存在")
+        prompt_file = business_scene_enum.prompt_file
+    
+    # 构建文件路径 判断文件是否存在
+    yaml_file = get_yaml_file_path(prompt_file)
+    
+    try:
+        with open(yaml_file, 'r', encoding='utf-8') as f:
+            prompt_config = yaml.safe_load(f)
+            server_logger.info(trace_id=trace_id , msg=f"business_scene_enum:{business_scene_enum.code} Get prompt successfully.")
+            return business_scene_enum , prompt_config
+    except Exception as e:
+        handler_err(server_logger, e, trace_id=trace_id, err_name="get_business_scene_prompt")
+        server_logger.error(trace_id=trace_id , msg=f"获取业务场景任务提示语失败: {e}")
+        raise e
+
+
+
+
+def get_yaml_file_path(file_name: str) -> str:
+    """
+        获取yaml文件路径
+        :param file_name:
+        :return:
+    """
+    yaml_file = os.path.join(current_dir , '../', 'config', 'prompt' , file_name)
+    if not os.path.exists(yaml_file):
+        raise FileNotFoundError(f"Prompt文件不存在: {file_name}")
+    return yaml_file
+
+
+
+#获取系统提示语
+system_prompt_config = get_system_prompt()

+ 39 - 0
views/__init__.py

@@ -0,0 +1,39 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :__init__.py.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/10 17:04
+'''
+
+import uuid
+from contextlib import asynccontextmanager
+from contextvars import ContextVar
+
+from fastapi import FastAPI, APIRouter
+
+from function.load_mcp_server import LoadMcpServer
+
+#mcp_server = LoadMcpServer()
+mcp_server = None
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+    # 启动时加载工具
+    #await mcp_server.get_mcp_tools()
+
+    yield
+    # 关闭时清理
+    if mcp_server and mcp_server.cleanup:
+        await mcp_server.close()
+
+cattle_router = APIRouter(prefix="/queryex", tags=["agent"])
+current_operation_id: ContextVar[str] = ContextVar("operation_id", default=str(uuid.uuid4()))
+
+
+
+def get_operation_id() -> str:
+    """依赖项:获取当前操作ID"""
+    return current_operation_id.get()

+ 338 - 0
views/cattle_farm_views.py

@@ -0,0 +1,338 @@
+# !/usr/bin/ python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :cattle_farm_views.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/7/10 17:32
+'''
+import json
+from typing import Optional
+
+from fastapi import Depends, Response, Header
+from sse_starlette import EventSourceResponse
+from starlette.responses import JSONResponse
+
+from agent.agent_mcp import client
+from generate.model_generate import XiwuzcModelGenerateClient
+from logger.loggering import server_logger
+from schemas.cattle_farm import CattleFarm
+from utils import yaml_utils
+from utils.common import return_json, handler_err
+from views import cattle_router, get_operation_id
+from agent.intent import intent_identify_client
+
+
+
+def get_token(authorization: Optional[str] = Header(default=None)):
+    """提取 Bearer Token (非必填)"""
+    if authorization is None:
+        return None
+
+    scheme, _, token = authorization.partition(" ")
+    return token
+
+
+def get_tenant_id(tenant_id: Optional[str] | None = Header(None, alias="X-lq-TENANT-ID")):
+    """处理租户ID"""
+    return tenant_id
+
+
+# 路由
+
+@cattle_router.post("/chat", response_model=CattleFarm)
+async def chat_endpoint(
+        param: CattleFarm,
+        token: str = Depends(get_token),
+        tenant_id: str = Depends(get_tenant_id),
+        trace_id: str = Depends(get_operation_id)):
+    """
+    根据场景获取智能体反馈
+    """
+
+    try:
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+        # 验证参数
+
+        # 从字典中获取input
+        input_data = param.input
+        session_id = param.config.sessionId
+        business_scene = param.businessScene
+        context = param.context
+        supplementInfo = param.supplementInfo
+        header_info = {
+            "token": token,
+            "tenantId": tenant_id,
+        }
+        # 如果business_scene为None,则使用大模型进行意图识别
+        if business_scene is None:
+            business_scene = await intent_identify_client.recognize_intent(trace_id=trace_id , config=param.config , input=input_data)
+            server_logger.info(trace_id=trace_id, msg=f"使用意图识别:business_scene={business_scene}")
+
+        business_scene_enum, task_prompt_info = yaml_utils.get_business_scene_prompt(trace_id=trace_id, business_scene=business_scene)
+        final_result_data_type = task_prompt_info["final_result_data_type"]       
+        server_logger.info(trace_id=trace_id, msg=f"session_id:{session_id}, business_scene:{business_scene},final_result_data_type:{final_result_data_type} ,input_data: {input_data}",
+                           log_type="queryex")
+        # stream 流式执行
+        output = await client.handle_query(trace_id , business_scene , task_prompt_info, input_data, context, supplementInfo, header_info , param.config)
+        # 直接执行
+        server_logger.debug(trace_id=trace_id, msg=f"【result】: {output}", log_type="queryex")
+        # 返回字典格式的响应
+        return JSONResponse(
+            return_json(business_scene=business_scene, data={"output": output}, data_type=final_result_data_type, trace_id=trace_id))
+    except ValueError as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
+
+    except Exception as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
+
+
+@cattle_router.post("/stream", response_class=Response)
+async def chat_agent(param: CattleFarm,
+                     token: str = Depends(get_token),
+                     tenant_id: str = Depends(get_tenant_id),
+                     trace_id: str = Depends(get_operation_id)):
+    """
+    根据场景获取智能体反馈 (SSE流式响应)
+    """
+    try:
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+
+       
+       
+
+        # 提取参数
+        input_data = param.input
+        session_id = param.config.sessionId
+        user_role = param.config.userRole
+        business_scene = param.businessScene
+        context = param.context
+        supplementInfo = param.supplementInfo
+        header_info = {
+            "token": token,
+            "tenantId": tenant_id,
+        }
+          # 如果business_scene为None,则使用大模型进行意图识别
+        
+        # 获取任务提示信息
+        from enums.common_enums import BusinessSceneEnum
+        business_scene_enum, task_prompt_info = BusinessSceneEnum.COMMON_MODEL_QUERY , {"task_prompt": ""}
+        final_result_data_type = "text"      
+        server_logger.info(trace_id=trace_id, msg=f"session_id:{session_id}, business_scene:{business_scene},final_result_data_type:{final_result_data_type} ,input_data: {input_data}",
+                           log_type="queryex")
+        
+       
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+        # 创建 SSE 流式响应
+        async def event_generator():
+            try:
+                # 流式处理查询
+                async for chunk in client.handle_query_stream(
+                        trace_id=trace_id,
+                        config_param=param.config,
+                        business_scene=business_scene,
+                        task_prompt_info=task_prompt_info,
+                        input_query=input_data,
+                        context=context,
+                        supplement_info=supplementInfo,
+                        header_info=header_info
+                ):
+                    server_logger.debug(trace_id=trace_id, msg=f"{chunk}")
+                    # 发送数据块
+                    yield {
+                        "event": "message",
+                        "data": json.dumps({
+                            "code": 0,
+                            "output": chunk,
+                            "completed": False,
+                            "trace_id": trace_id,
+                            "dataType": final_result_data_type,
+                            "business_scene": business_scene,
+                        }, ensure_ascii=False)
+                    }
+                # 获取缓存数据
+                result_data = await client.get_redis_result_cache_data(trace_id=trace_id)
+                # 发送结束事件
+                yield {
+                    "event": "message_end",
+                    "data": json.dumps({
+                        "completed": True,
+                        "message": json.dumps(result_data, ensure_ascii=False),
+                        "code": 0,
+                        "trace_id": trace_id,
+                        "dataType": "text",
+                        "business_scene": business_scene,
+                    }, ensure_ascii=False),
+                }
+            except Exception as e:
+                # 错误处理
+                yield {
+                    "event": "error",
+                    "data": json.dumps({
+                        "trace_id": trace_id,
+                        "message": str(e),
+                        "code": 1,
+                        "dataType": "text",
+                        "business_scene": business_scene,
+                    }, ensure_ascii=False)
+                }
+            finally:
+                # 不需要关闭客户端,因为它是单例
+                pass
+
+        # 返回 SSE 响应
+        return EventSourceResponse(
+            event_generator(),
+            headers={
+                "Cache-Control": "no-cache",
+                "Connection": "keep-alive"
+            }
+        )
+
+    except Exception as err:
+        # 初始错误处理
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex")
+        return JSONResponse(
+            return_json(code=1, msg=f"{err}", trace_id=trace_id),
+            status_code=500
+        )
+
+
+@cattle_router.post("/generate/ai_stream", response_class=Response)
+def chat_stream(param: CattleFarm,
+        token: str = Depends(get_token),
+        tenant_id: str = Depends(get_tenant_id),
+        trace_id: str = Depends(get_operation_id)):
+    try:
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+
+        # 提取参数
+        input_data = param.input
+        session_id = param.config.sessionId
+        business_scene = param.businessScene
+        context = param.context
+        supplementInfo = param.supplementInfo
+        header_info = {
+            "token": token,
+            "tenantId": tenant_id,
+        }
+          # 如果business_scene为None,则使用大模型进行意图识别
+        if business_scene is None:
+            business_scene = intent_identify_client.recognize_intent(input_data)
+            server_logger.info(trace_id=trace_id, msg=f"使用意图识别:business_scene={business_scene}")
+
+        # 获取系统提示
+        business_scene_enum, task_prompt_info = yaml_utils.get_business_scene_prompt(trace_id=trace_id, business_scene=business_scene)
+        server_logger.info(trace_id=trace_id, msg=f"session_id:{session_id}, business_scene:{business_scene} , business_scene_enum:{business_scene_enum} ,input_data: {input_data}",
+                           log_type="queryex")
+        xwzc_generate_client = XiwuzcModelGenerateClient()
+        # 创建 SSE 流式响应
+        async def event_generator():
+            try:
+                # 流式处理查询
+                for chunk in xwzc_generate_client.get_model_generate_stream(task_prompt_info, session_id, input_data,
+                                                                            context, supplementInfo):
+                    # 发送数据块
+                    yield {
+                        "event": "message",
+                        "data": json.dumps({
+                            "output": chunk,
+                            "completed": False,
+                            "trace_id": trace_id,
+                            "dataType": business_scene_enum.data_type
+                        })
+                    }
+
+                # 发送结束事件
+                yield {
+                    "event": "message_end",
+                    "data": json.dumps({
+                        "completed": True,
+                        "message": "Stream completed",
+                        "code": 0,
+                        "trace_id": trace_id
+                    }),
+
+                }
+            except Exception as e:
+                # 错误处理
+                yield {
+                    "event": "error",
+                    "data": json.dumps({
+                        "trace_id": trace_id,
+                        "msg": str(e),
+                        "code": 1,
+                        "dataType": "text"
+                    })
+                }
+
+        # 返回 SSE 响应
+        return EventSourceResponse(
+            event_generator(),
+            headers={
+                "Cache-Control": "no-cache",
+                "Connection": "keep-alive"
+            }
+        )
+
+    except Exception as err:
+        # 初始错误处理
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex")
+        return JSONResponse(
+            return_json(code=1, msg=f"{err}", trace_id=trace_id),
+            status_code=500
+        )
+
+
+
+
+@cattle_router.post("/generate/tools/execute", response_model=CattleFarm)
+async def chat_generate_tools_endpoint(
+        param: CattleFarm,
+        token: str = Depends(get_token),
+        tenant_id: str = Depends(get_tenant_id),
+        trace_id: str = Depends(get_operation_id)):
+    """
+        工具调用
+    """
+
+    try:
+        server_logger.info(trace_id=trace_id, msg=f"{param}")
+        # 验证参数
+
+        # 从字典中获取input
+        input_data = param.input
+        session_id = param.config.sessionId
+        business_scene = param.businessScene
+        context = param.context
+        supplementInfo = param.supplementInfo
+        header_info = {
+            "token": token,
+            "tenantId": tenant_id,
+        }
+        # 如果business_scene为None,则使用大模型进行意图识别
+        if business_scene is None:
+            business_scene = intent_identify_client.recognize_intent(input_data)
+            server_logger.info(trace_id=trace_id, msg=f"使用意图识别:business_scene={business_scene}")
+
+        business_scene_enum, task_prompt_info = yaml_utils.get_business_scene_prompt(trace_id=trace_id, business_scene=business_scene)
+        server_logger.info(trace_id=trace_id, msg=f"session_id:{session_id}, business_scene:{business_scene} , business_scene_enum:{business_scene_enum} ,input_data: {input_data}",
+                           log_type="queryex/tools2")
+        xwzc_generate_client = XiwuzcModelGenerateClient()
+        # stream 流式执行
+        output = await xwzc_generate_client.get_model_tools_call(trace_id, session_id, task_prompt_info, input_data, context, supplementInfo, header_info)
+        # 直接执行
+        server_logger.debug(trace_id=trace_id, msg=f"【result】: {output}", log_type="queryex/tools2")
+        # 返回字典格式的响应
+        return JSONResponse(
+            return_json(data={"output": output}, data_type=business_scene_enum.data_type, trace_id=trace_id))
+    except ValueError as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex/tools2")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
+
+    except Exception as err:
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="queryex/tools2")
+        return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))

+ 71 - 0
views/fixed_answer.py

@@ -0,0 +1,71 @@
+
+
+
+import asyncio
+from sse_starlette import EventSourceResponse
+from utils.yaml_utils import fixed_question_intent_config
+from logger.loggering import server_logger
+from agent.fixed_intent import fixed_intent_identify_client
+from utils.tool_utils import get_fixed_problem_answer_txt_content ,verify_user_role
+
+
+
+
+def get_fixed_problem_answer_txt(trace_id, input , user_role):
+    """
+        获取固定答案
+        
+    """
+    # 验证用户角色,租户才能查询固定问题
+    flag_tenant = verify_user_role(user_role)
+    if not flag_tenant:
+        # 普通用户直接返回
+        return False , input
+
+    result = fixed_intent_identify_client.recognize_intent(input)
+    server_logger.info(trace_id=trace_id, msg=f"固定问题意图识别系统: input={input}, result={result}")
+    # 获取固定问题 配置列表,判断意图识别是否在配置列表中
+    if result not in get_fixed_problem_answer_recognize_out_list(trace_id=trace_id):
+        # 不在 配置列表中
+        return False , input
+    
+    answer_result = get_fixed_problem_answer_txt_content(result)
+    return True , answer_result
+
+
+
+def get_fixed_problem_answer_recognize_out_list(trace_id: str):
+    """
+        获取固定问题 意图识别结果配置列表
+        如:
+        [
+            "question_1",
+            "question_2",
+            "question_3",
+            "question_4",
+            "question_5",
+            "question_6",
+            "question_7",
+            "question_8",
+            "question_9",
+        ]
+    """
+    fixed_problem_answer_list = fixed_question_intent_config["fixed_problem_answer"]
+    recognize_intent_out_list = list(map(lambda x: x["out"], fixed_problem_answer_list))
+    server_logger.info(trace_id=trace_id, msg=f"配置固定问题意图识别结果列表: recognize_intent_out_list={recognize_intent_out_list}")
+    return recognize_intent_out_list
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+