Просмотр исходного кода

增加部署说明和案例测试修改

lingmin_package@163.com 3 месяцев назад
Родитель
Сommit
82fcabb22e

+ 1 - 1
Dockerfile

@@ -1,4 +1,4 @@
-FROM python:3.13-slim
+FROM python:3.12-slim
 
 ENV DEBIAN_FRONTEND=noninteractive \
     TZ=Asia/Shanghai

+ 38 - 0
README_deploy.md

@@ -0,0 +1,38 @@
+
+
+
+#### docker 容器部署
+##### 目前采用离线打包docker容器上传部署模式
+  - 1、本地容器打包        docker build -t lq_agent_platform_server:v0.1 .
+    - 1.1、保存本地镜像文件    docker save -o lq_agent_platform_v0.1.img lq_agent_platform:v0.1
+    - 1.2、容器压缩           tar -czvf lq_agent_platform_v0.1.tar.gz lq_agent_platform_v0.1.img
+    - 1.3、sftp上传到测试环境目录:       /home/cjb/lq_workspace/app/LqAgentServer/docker_tmp
+    - 1.4、容器解压           tar -xzvf lq_agent_platform_v0.1.tar.gz
+  - 6、删除测试环境原镜像   docker rmi lq_agent_platform:v0.1
+  - 7、容器加载           docker load -i lq_agent_platform_v0.1.img
+  - 8、容器启动
+    
+    docker run --name=LQAgentServer -d  --memory="4096m" --memory-swap="5000m" --cpus="3" --cpuset-cpus="0-2" --restart=always -p 8001:8001 -v /home/cjb/lq_workspace/app/LqAgentServer/config:/app/config/ -v /home/cjb/lq_workspace/app/LqAgentServer/gunicorn_log/:/app/gunicorn_log/  -v /home/cjb/lq_workspace/app/LqAgentServer/logs/:/app/logs/ --network=host lq_agent_platform_server:v0.1
+
+
+
+  - 9、容器copy文件
+     - 进入容器查看文件:docker exec -it LQAgentServer /bin/sh 
+     - copy外部文件到容器内:docker cp gunicorn_config.py LQAgentServer:/app/gunicorn_config
+  
+    - 实例启动: docker start LQAgentServer
+    - 实例停止: docker stop LQAgentServer
+    - 实例重启: docker restart LQAgentServer
+    - 使用docker查看日志  docker logs -f LQAgentServer
+    - 使用docker查看日志  docker logs -f --tail {行数} LQAgentServer
+
+  - 9、路径文件映射
+    - 配置文件
+      - 宿主机:/home/cjb/lq_workspace/app/LqAgentServer/config/ ==> 容器:/app/config/ 
+      - 宿主机:/home/cjb/lq_workspace/app/LqAgentServer/logs/ ==> 容器:/app/logs/
+    - 配置文件路径说明,注意修改后重启容器
+      - 宿主机配置文件路径
+        vim /home/cjb/lq_workspace/app/LqAgentServer/config/prompt/{prompt文件名称}.yaml
+        vim /home/cjb/lq_workspace/app/LqAgentServer/config/config.ini
+
+      - 宿主机日志地址 /home/cjb/lq_workspace/app/LqAgentServer/logs/

BIN
build_graph_app.png


+ 9 - 9
config/config.ini

@@ -78,13 +78,13 @@ QWEN_LOCAL_14B_API_KEY=sk-dummy
 
 
 [mysql]
-MYSQL_HOST=localhost
-MYSQL_PORT=3306
+MYSQL_HOST=192.168.0.3
+MYSQL_PORT=13306
 MYSQL_USER=root
-MYSQL_PASSWORD=admin
+MYSQL_PASSWORD=lq@123
 MYSQL_DB=lq_db
 MYSQL_MIN_SIZE=1
-MYSQL_MAX_SIZE=2
+MYSQL_MAX_SIZE=5
 MYSQL_AUTO_COMMIT=True
 
 
@@ -99,8 +99,8 @@ PGVECTOR_PASSWORD=pg16@123
 
 
 [milvus]
-MILVUS_HOST=124.223.140.149
-MILVUS_PORT=7432
-MILVUS_DB=vector_db
-MILVUS_USER=vector_user
-MILVUS_PASSWORD=pg16@123
+MILVUS_HOST=192.168.0.3
+MILVUS_PORT=19530
+MILVUS_DB=lq_db
+MILVUS_USER=
+MILVUS_PASSWORD=

+ 2 - 2
foundation/agent/generate/model_generate.py

@@ -39,12 +39,12 @@ class GenerateModelClient:
         # logger.info(f"[模型生成结果]: {response.content}")
         return response.content
 
-    async def get_model_generate_stream(self, trace_id, task_prompt_info: dict):
+    def get_model_generate_stream(self, trace_id, task_prompt_info: dict):
         """
             模型流式生成(异步)
         """
         prompt_template = task_prompt_info["task_prompt"]
-        # 直接格式化消息,不需要额外的invoke步骤
+        # 直接格式化消息,不需要额外的invoke步骤  stream
         messages = prompt_template.format_messages()
         response = self.llm.stream(messages)
         for chunk in response:

+ 12 - 3
foundation/agent/workflow/test_workflow_node.py

@@ -22,7 +22,7 @@ from foundation.agent.generate.test_intent import intent_identify_client
 from foundation.agent.test_agent import test_agent_client
 from foundation.schemas.test_schemas import FormConfig
 from foundation.agent.generate.model_generate import generate_model_client
-
+from foundation.utils.yaml_utils import system_prompt_config
 
 
 
@@ -87,7 +87,7 @@ class TestWorkflowNode:
         }
     
 
-    def chat_box_generate(self , state: TestCusState) -> dict:
+    async def chat_box_generate(self , state: TestCusState) -> dict:
         """
             模型生成节点(纯生成类问题)
             :param state:
@@ -98,7 +98,16 @@ class TestWorkflowNode:
         user_input = state["user_input"]
         task_prompt_info = state["task_prompt_info"]
         task_prompt_info["task_prompt"] = ""
-        response_content = generate_model_client.get_model_generate_invoke(trace_id=trace_id , task_prompt_info=task_prompt_info, input_query=user_input)
+
+      # 创建ChatPromptTemplate
+        template = ChatPromptTemplate.from_messages([
+            ("system", system_prompt_config['system_prompt']),
+            ("user", user_input)
+        ])
+
+        task_prompt_info = {"task_prompt": template}
+
+        response_content = await generate_model_client.get_model_generate_invoke(trace_id=trace_id , task_prompt_info=task_prompt_info)
         messages = [AIMessage(content=response_content , name="chat_box_generate")]
         server_logger.info(trace_id=trace_id, msg=f"【result】: {response_content}", log_type="chat_box_generate")
         return {

+ 6 - 2
requirements.txt

@@ -121,7 +121,6 @@ pydantic-settings==2.10.1
 pydantic_core==2.33.2
 Pygments==2.19.2
 PyJWT==2.8.0
-pymilvus==2.5.12
 PyMuPDF==1.26.3
 PyMySQL==1.1.1
 pyperclip==1.9.0
@@ -182,7 +181,12 @@ langgraph-checkpoint-postgres==2.0.23
 langgraph-checkpoint-redis==0.0.8
 langchain-redis==0.2.3
 aiomysql==0.3.2
-celery=5.5.3
+celery==5.5.3
 pypdf==6.2.0
 grandalf==0.8
+psycopg2-binary==2.9.11
+pgvector==0.4.1
+pymilvus==2.5.11
+sentence-transformers==4.1.0
+
 

+ 1 - 1
run.sh

@@ -1,7 +1,7 @@
 #!/bin/bash
 
 # 服务管理脚本
-APP_NAME="xiwu_agent_server"         # 自定义服务名称
+APP_NAME="lq_agent_platform_server"         # 自定义服务名称
 PID_FILE="./gunicorn_log/gunicorn.pid"          # PID 文件路径
 LOG_FILE="./gunicorn_log/gunicorn.log"          # 日志文件路径
 START_COMMAND="gunicorn -c gunicorn_config.py server.app:app"

+ 22 - 9
views/test_views.py

@@ -28,6 +28,8 @@ from database.repositories.bus_data_query import BasisOfPreparationDAO
 from foundation.utils.tool_utils import DateTimeEncoder
 from foundation.models.silicon_flow import SiliconFlowAPI
 from foundation.rag.vector.pg_vector import PGVectorDB
+from langchain_core.prompts import ChatPromptTemplate
+from foundation.utils.yaml_utils import system_prompt_config
 
 
 @test_router.post("/generate/chat", response_model=TestForm)
@@ -46,21 +48,27 @@ async def generate_chat_endpoint(
         context = param.context
         header_info = {
         }
-        task_prompt_info = {"task_prompt": ""}
-        output = generate_model_client.get_model_generate_invoke(trace_id , task_prompt_info, 
-                                                                                 input_query, context)
+        
+            # 创建ChatPromptTemplate
+        template = ChatPromptTemplate.from_messages([
+            ("system", system_prompt_config['system_prompt']),
+            ("user", input_query)
+        ])
+
+        task_prompt_info = {"task_prompt": template}
+        output = await generate_model_client.get_model_generate_invoke(trace_id=trace_id , task_prompt_info=task_prompt_info)
         # 直接执行
-        server_logger.debug(trace_id=trace_id, msg=f"【result】: {output}", log_type="agent/chat")
+        server_logger.info(trace_id=trace_id, msg=f"【result】: {output}", log_type="agent/chat")
         # 返回字典格式的响应
         return JSONResponse(
             return_json(data={"output": output}, data_type="text", trace_id=trace_id))
 
     except ValueError as err:
-        handler_err(server_logger, trace_id=trace_id, err=err, err_name="generate/stream")
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="agent/chat")
         return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
 
     except Exception as err:
-        handler_err(server_logger, trace_id=trace_id, err=err, err_name="generate/stream")
+        handler_err(server_logger, trace_id=trace_id, err=err, err_name="agent/chat")
         return JSONResponse(return_json(code=100500, msg=f"{err}", trace_id=trace_id))
 
 
@@ -80,13 +88,18 @@ async def generate_stream_endpoint(
         context = param.context
         header_info = {
         }
-        task_prompt_info = {"task_prompt": ""}
+              # 创建ChatPromptTemplate
+        template = ChatPromptTemplate.from_messages([
+            ("system", system_prompt_config['system_prompt']),
+            ("user", input_query)
+        ])
+
+        task_prompt_info = {"task_prompt": template}
         # 创建 SSE 流式响应
         async def event_generator():
             try:
                 # 流式处理查询 trace_id, task_prompt_info: dict, input_query, context=None
-                for chunk in generate_model_client.get_model_generate_stream(trace_id , task_prompt_info, 
-                                                                                 input_query, context):
+                for chunk in generate_model_client.get_model_generate_stream(trace_id=trace_id , task_prompt_info=task_prompt_info):
                     # 发送数据块
                     yield {
                         "event": "message",