| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114 |
- # !/usr/bin/ python
- # -*- coding: utf-8 -*-
- '''
- @Project : lq-agent-api
- @File :model_generate.py
- @IDE :PyCharm
- @Author :
- @Date :2025/7/14 14:22
- '''
- from typing import Dict, Optional
- from langchain_core.prompts import HumanMessagePromptTemplate
- from langchain_core.prompts import ChatPromptTemplate
- from foundation.utils.utils import get_models
- from foundation.utils.yaml_utils import system_prompt_config
- class TestGenerateModelClient:
- """
- 主要是生成式模型
- """
- def __init__(self):
- # 获取部署的模型列表
- llm, chat, embed = get_models()
- self.llm = llm
- self.chat = chat
- # 固定系统提示词
- self.system_prompt = system_prompt_config["system_prompt"]
- def get_prompt_template(self):
- """
- 构造普通Prompt提示词模板
- """
- human_template = """
- {system_message}
- 用户的问题为:
- {question}
- 答案为:
- """
- human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
- chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
- return chat_prompt_template
-
-
- def get_model_generate_invoke(self, trace_id, task_prompt_info: dict, input_query, context=None):
- """
- 模型生成链
- """
- # Step 1: 定义系统提示词模板 system_prompt
- # Step 2: 构建完整的 prompt 模板
- prompt_template = ChatPromptTemplate.from_messages([
- ("system", self.system_prompt), #task_prompt_info["task_prompt"]
- ("human", "{input}")
- ])
- # Step 3: 初始化模型
- # Step 4: 使用模板格式化输入
- messages = prompt_template.invoke({"input": input_query})
- # Step 5: 流式调用模型
- response = self.llm.invoke(messages)
- return response.content
- def get_model_generate_stream(self, trace_id, task_prompt_info: dict, input_query, context=None):
- """
- 模型生成链
- """
- # Step 1: 定义系统提示词模板 system_prompt
- # Step 2: 构建完整的 prompt 模板
- prompt_template = ChatPromptTemplate.from_messages([
- ("system", self.system_prompt), #task_prompt_info["task_prompt"]
- ("human", "{input}")
- ])
- # Step 3: 初始化模型
- # Step 4: 使用模板格式化输入
- messages = prompt_template.invoke({"input": input_query})
- # Step 5: 流式调用模型
- response = self.llm.stream(messages)
- # Step 6: 逐 token 输出(打字机效果)
- for chunk in response:
- yield chunk.content
- def get_input_context(
- self,
- trace_id: str,
- task_prompt_info: dict,
- input_query: str,
- context: Optional[str] = None
- ) -> str:
- #server_logger.info(f"task_prompt_info: {task_prompt_info}")
- """构建问题和上下文"""
- context = context or "无"
- task_prompt_info_str = task_prompt_info["task_prompt"]
- # 针对场景优化的上下文提示
- base_context_prompt = """
- 日志链路跟踪ID:{trace_id}
- 任务信息:{task_prompt_info_str}
- 相关上下文数据:{context}
- 户问题:{input}
- """
- return base_context_prompt.format(
- trace_id=trace_id,
- task_prompt_info_str=task_prompt_info_str,
- context=context,
- input=input_query
- )
- #
- test_generate_model_client = TestGenerateModelClient()
|