Przeglądaj źródła

v0.0.4-debug
- 修复semantic_logic_check 字段未对应情况
- 优化审查RAG链路性能

WangXuMing 3 miesięcy temu
rodzic
commit
384021b9d9

+ 9 - 0
core/base/workflow_manager.py

@@ -361,7 +361,16 @@ class WorkflowManager:
 
             return False
 
+        except RuntimeError as e:
+            # 事件循环关闭是正常情况(任务结束),不记录错误
+            if "Event loop is closed" in str(e):
+                logger.debug(f"检查终止信号时事件循环已关闭: {callback_task_id}")
+                return False
+            else:
+                logger.error(f"检查终止信号失败: {str(e)}", exc_info=True)
+                return False
         except Exception as e:
+            # 其他异常仍然记录错误
             logger.error(f"检查终止信号失败: {str(e)}", exc_info=True)
             return False
 

+ 29 - 19
core/construction_review/component/ai_review_engine.py

@@ -58,7 +58,7 @@ from typing import Any, Dict, List, Optional, Sequence
 from core.base.task_models import TaskFileInfo
 from core.construction_review.component.infrastructure.milvus import MilvusConfig, MilvusManager
 from core.construction_review.component.infrastructure.parent_tool import (
-    enhance_with_parent_docs,
+    enhance_with_parent_docs_grouped,
     extract_query_pairs_results
 )
 from core.construction_review.component.infrastructure.relevance import is_relevant_async
@@ -259,7 +259,7 @@ class AIReviewEngine(BaseReviewer):
         if not basic_tasks:
             return {
                 "grammar_check": self._process_review_result(None),
-                "semantic_check": self._process_review_result(None),
+                "semantic_logic_check": self._process_review_result(None),
                 "sensitive_check": self._process_review_result(None),
             }
 
@@ -317,7 +317,7 @@ class AIReviewEngine(BaseReviewer):
             json.dump(completeness_result,f,ensure_ascii=False,indent=4)
         return {
             'grammar_check': grammar_result,
-            'semantic_check': semantic_result,
+            'semantic_logic_check': semantic_result,
             'sensitive_check': sensitive_result,
             'completeness_check': completeness_result,
         }
@@ -583,23 +583,25 @@ class AIReviewEngine(BaseReviewer):
         Returns:
             Dict[str, Any]: RAG增强审查结果
         """
-        # Step 1: 向量检索
+        # Step 1: 获取待审查单元内容
         query_content = unit_content['content']
         logger.info(f"[RAG增强] 开始处理, 内容长度: {len(query_content)}")
 
-        # Step 2: 查询提取 + 实体增强检索
+        # Step 2: 构建查询对
         query_pairs = query_rewrite_manager.query_extract(query_content)
         logger.info(f"[RAG增强] 提取到 {len(query_pairs)} 个查询对")
 
+        # Step 3: 根据查询对主实体、辅助实体,进行实体增强召回
         bfp_result_lists = entity_enhance.entities_enhance_retrieval(query_pairs)
 
-        # 🔍 保存关键节点结果(用于对比分析)
-        os.makedirs("temp/ai_review_engine", exist_ok=True)
-        with open("temp/ai_review_engine/bfp_result_lists.json", "w", encoding='utf-8') as f:
-            json.dump(bfp_result_lists, f, ensure_ascii=False, indent=4)
-        logger.info("[RAG增强] ✅ 已保存 bfp_result_lists 到 temp/ai_review_engine/bfp_result_lists.json")
 
-        # Step 3: 检查检索结果
+        # # 🔍 保存关键节点结果(用于对比分析)
+        # os.makedirs("temp/ai_review_engine", exist_ok=True)
+        # with open("temp/ai_review_engine/bfp_result_lists.json", "w", encoding='utf-8') as f:
+        #     json.dump(bfp_result_lists, f, ensure_ascii=False, indent=4)
+        # logger.info("[RAG增强] ✅ 已保存 bfp_result_lists 到 temp/ai_review_engine/bfp_result_lists.json")
+
+        # Step 4: 检查检索结果
         if not bfp_result_lists:
             logger.warning("[RAG增强] 实体检索未返回结果")
             return {
@@ -610,19 +612,27 @@ class AIReviewEngine(BaseReviewer):
                 'metadata': {}
             }
 
-        # Step 4: 父文档增强 (使用独立工具函数 - 显式返回)
+        # Step 5: 父文档增强(使用分组增强策略)
         try:
-            enhancement_result = enhance_with_parent_docs(self.milvus, bfp_result_lists)
+            enhancement_result = enhance_with_parent_docs_grouped(
+                self.milvus,
+                bfp_result_lists,
+                score_threshold=0.5,  # bfp_rerank_score 阈值
+                max_parents_per_pair=3  # 每个查询对最多3个父文档
+            )
             enhanced_results = enhancement_result['enhanced_results']
             enhanced_count = enhancement_result['enhanced_count']
+            enhanced_pairs = enhancement_result.get('enhanced_pairs', 0)
+            total_pairs = enhancement_result.get('total_pairs', 0)
 
-            # 🔍 保存关键节点结果(用于对比分析)
-            with open("temp/ai_review_engine/enhance_with_parent_docs.json", "w", encoding='utf-8') as f:
-                json.dump(enhanced_results, f, ensure_ascii=False, indent=4)
-            logger.info(f"[RAG增强] ✅ 已保存 enhance_with_parent_docs 到 temp/ai_review_engine/enhance_with_parent_docs.json (共{enhanced_count}个)")
+            # 确保目录存在
+            os.makedirs("temp/ai_review_engine", exist_ok=True)
 
-            logger.info(f"[RAG增强] 成功增强 {enhanced_count} 个结果")
-            logger.info(f"[RAG增强] 使用了 {len(enhancement_result['parent_docs'])} 个父文档")
+            with open("temp/ai_review_engine/enhance_with_parent_docs_grouped.json", "w", encoding='utf-8') as f:
+                json.dump(enhanced_results, f, ensure_ascii=False, indent=4)
+            logger.info(f"[RAG增强] ✅ 已保存分组增强结果到 temp/ai_review_engine/enhance_with_parent_docs_grouped.json")
+            logger.info(f"[RAG增强] 分组增强完成: {enhanced_pairs}/{total_pairs} 个查询对进行了增强")
+            logger.info(f"[RAG增强] 成功增强 {enhanced_count} 个结果,使用了 {len(enhancement_result['parent_docs'])} 个父文档")
         except Exception as e:
             logger.error(f"[RAG增强] 父文档增强失败: {e}", exc_info=True)
             # 失败时使用原始结果

+ 83 - 107
core/construction_review/component/infrastructure/parent_tool.py

@@ -63,141 +63,116 @@ def fetch_parent_document(
         return None
 
 
-def enhance_with_parent_docs(
+
+def enhance_with_parent_docs_grouped(
     milvus_manager,
     bfp_result_lists: List,
-    top_k: int = 3,
-    max_parent_text_length: Optional[int] = None
+    score_threshold: float = 0.5,
+    max_parents_per_pair: int = 2,
+    # max_parent_text_length: Optional[int] = None
 ) -> Dict[str, Any]:
     """
-    使用父文档增强检索结果 (显式返回版本)
+    分组增强 + 按分数筛选 (每个查询对独立处理)
 
-    流程:
-    1. 统计所有 parent_id 的出现频率
-    2. 按频率排序,取 top-k 个 parent_id
-    3. 查询这 k 个父文档
-    4. 将所有父文档拼接在一起
-    5. 创建新的结果列表,添加父文档内容
-    6. 显式返回增强后的结果
+    核心逻辑:
+    1. 每个查询对独立处理,按 bfp_rerank_score 筛选
+    2. 只保留并增强高分结果,低分查询对直接跳过
+    3. 用父ID从Milvus召回父文档内容
+    4. 将父文档内容拼接到高分结果后
 
     Args:
         milvus_manager: MilvusManager 实例
-        bfp_result_lists: 检索结果列表 (不会被修改)
-        top_k: 提取前k个父文档ID (默认3个)
+        bfp_result_lists: 检索结果列表 (二维,每个子列表对应一个查询对)
+        score_threshold: bfp_rerank_score 最低阈值,低于此分数直接跳过 (默认0.7)
+        max_parents_per_pair: 每个查询对最多选取的父文档数量 (默认2个)
         max_parent_text_length: 单个父文档最大长度限制 (None=不限制)
 
     Returns:
-        Dict[str, Any]: 增强结果,包含:
-            - enhanced_results: 增强后的结果列表
+        Dict: 增强结果,包含:
+            - enhanced_results: 增强后的结果列表 (二维,只包含高分查询对)
             - enhanced_count: 成功增强的结果数量
-            - parent_docs: 使用的父文档列表
-            - combined_text: 拼接后的父文档文本
+            - parent_docs: 使用的所有父文档列表
+            - enhanced_pairs: 进行了增强的查询对数量
+            - total_pairs: 原始查询对总数
     """
-    # Step 1: 统计 parent_id 出现频率
-    parent_id_freq = {}
-    for result_list in bfp_result_lists:
+    enhanced_results = []
+    total_enhanced_count = 0
+    all_parent_docs = []
+    enhanced_pairs_count = 0
+
+    logger.info(f"[分组增强] 开始处理 {len(bfp_result_lists)} 个查询对,阈值={score_threshold}")
+
+    for pair_idx, result_list in enumerate(bfp_result_lists):
         if not result_list:
             continue
-        for result in result_list:
-            metadata = result.get('metadata', {})
-            parent_id = metadata.get('parent_id')
-            if parent_id:
-                parent_id = str(parent_id)
-                parent_id_freq[parent_id] = parent_id_freq.get(parent_id, 0) + 1
-
-    if not parent_id_freq:
-        logger.info("[父文档工具] 没有发现父文档ID")
-        return {
-            'enhanced_results': [],
-            'enhanced_count': 0,
-            'parent_docs': [],
-            'combined_text': ''
-        }
 
-    # Step 2: 按频率排序,取 top-k 个 parent_id
-    top_parent_items = sorted(
-        parent_id_freq.items(),
-        key=lambda x: x[1],
-        reverse=True
-    )[:top_k]
-
-    top_parent_ids = [pid for pid, freq in top_parent_items]
-    logger.info(f"[父文档工具] 提取 top-{len(top_parent_ids)} 父文档ID: {top_parent_ids}")
-    logger.debug(f"[父文档工具] 父文档频率: {dict(top_parent_items)}")
-
-    # Step 3: 批量查询父文档内容
-    parent_docs = []
-    for parent_id in top_parent_ids:
-        parent_doc = fetch_parent_document(
-            milvus_manager=milvus_manager,
-            parent_id=parent_id
-        )
+        # 1. 按分数排序并筛选高分结果
+        sorted_results = sorted(result_list, key=lambda x: x.get('bfp_rerank_score', 0), reverse=True)
+        high_score_results = [r for r in sorted_results if r.get('bfp_rerank_score', 0) >= score_threshold]
 
-        if parent_doc and parent_doc.get('text'):
-            parent_text = parent_doc['text']
+        if not high_score_results:
+            logger.info(f"[分组增强] 查询对 {pair_idx}: 所有结果分数均低于 {score_threshold},跳过")
+            continue
 
-            # 可选: 截断过长的父文档
-            if max_parent_text_length and len(parent_text) > max_parent_text_length:
-                parent_text = parent_text[:max_parent_text_length] + "\n...(内容过长已截断)"
-                logger.debug(f"[父文档工具] 父文档 {parent_id} 内容过长,已截断到 {max_parent_text_length} 字符")
+        # 2. 提取父ID(去重,限制数量)
+        parent_ids = list(set([
+            r.get('metadata', {}).get('parent_id')
+            for r in high_score_results[:max_parents_per_pair]
+            if r.get('metadata', {}).get('parent_id')
+        ]))
 
-            parent_docs.append({
-                'parent_id': parent_id,
-                'text': parent_text
-            })
-            logger.info(f"[父文档工具] 成功查询父文档 {parent_id}, 内容长度: {len(parent_text)}")
-        else:
-            logger.warning(f"[父文档工具] 父文档 {parent_id} 查询失败或内容为空")
-
-    if not parent_docs:
-        logger.warning("[父文档工具] 所有父文档查询均失败")
-        return {
-            'enhanced_results': [],
-            'enhanced_count': 0,
-            'parent_docs': [],
-            'combined_text': ''
-        }
+        if not parent_ids:
+            logger.warning(f"[分组增强] 查询对 {pair_idx}: 没有有效的parent_id,跳过")
+            continue
 
-    # Step 4: 将所有父文档拼接在一起
-    combined_parent_text = "\n".join([
-        f"【父文档 {i+1}】\n{doc['text']}"
-        for i, doc in enumerate(parent_docs)
-    ])
-    logger.info(f"[父文档工具] 拼接了 {len(parent_docs)} 个父文档, 总长度: {len(combined_parent_text)}")
+        # 3. 查询父文档内容
+        parent_docs = []
+        for pid in parent_ids:
+            doc = fetch_parent_document(milvus_manager, str(pid))
+            if doc and doc.get('text'):
+                text = doc['text']
+                # if max_parent_text_length and len(text) > max_parent_text_length:
+                #     text = text[:max_parent_text_length] + "\n...(已截断)"
+                parent_docs.append({'parent_id': pid, 'text': text})
+
+        if not parent_docs:
+            logger.warning(f"[分组增强] 查询对 {pair_idx}: 父文档查询失败,跳过")
+            continue
 
-    # Step 5: 创建新的增强结果列表
-    enhanced_results = []
-    enhanced_count = 0
+        # 4. 拼接父文档内容
+        combined_text = "\n".join([f"【参考文档 {i+1}】\n{d['text']}" for i, d in enumerate(parent_docs)])
 
-    for result_list in bfp_result_lists:
+        # 5. 只保留并增强高分结果
         enhanced_list = []
-        if not result_list:
-            enhanced_results.append(enhanced_list)
-            continue
-
         for result in result_list:
-            # 创建新的结果字典 (不修改原数据)
-            enhanced_result = {
-                'text_content': result.get('text_content', '') + f"\n{combined_parent_text}\n",
-                'metadata': result.get('metadata', {}),
-                'hybrid_similarity': result.get('hybrid_similarity'),
-                'rerank_score': result.get('rerank_score'),
-                'bfp_rerank_score': result.get('bfp_rerank_score'),
-                'bfp_rerank_parent_id': result.get('bfp_rerank_parent_id', '')
-            }
-            enhanced_list.append(enhanced_result)
-            enhanced_count += 1
-
-        enhanced_results.append(enhanced_list)
+            if result.get('bfp_rerank_score', 0) >= score_threshold:
+                enhanced_list.append({
+                    'text_content': result.get('text_content', '') + f"\n{combined_text}\n",
+                    'metadata': result.get('metadata', {}),
+                    'hybrid_similarity': result.get('hybrid_similarity'),
+                    'rerank_score': result.get('rerank_score'),
+                    'bfp_rerank_score': result.get('bfp_rerank_score'),
+                    'bfp_rerank_parent_id': result.get('bfp_rerank_parent_id', ''),
+                    'source_entity': result.get('source_entity', ''),
+                    'enhanced': True,
+                    'parent_docs_count': len(parent_docs)
+                })
+
+        if enhanced_list:
+            enhanced_results.append(enhanced_list)
+            all_parent_docs.extend(parent_docs)
+            enhanced_pairs_count += 1
+            total_enhanced_count += len(enhanced_list)
+            logger.info(f"[分组增强] 查询对 {pair_idx}: 保留 {len(enhanced_list)} 个高分结果")
 
-    logger.info(f"[父文档工具] 成功增强 {enhanced_count} 个结果")
+    logger.info(f"[分组增强] 完成: {enhanced_pairs_count}/{len(bfp_result_lists)} 个查询对,{total_enhanced_count} 个结果")
 
-    # Step 6: 显式返回增强结果
     return {
         'enhanced_results': enhanced_results,
-        'enhanced_count': enhanced_count,
-        'parent_docs': parent_docs,
-        'combined_text': combined_parent_text
+        'enhanced_count': total_enhanced_count,
+        'parent_docs': all_parent_docs,
+        'enhanced_pairs': enhanced_pairs_count,
+        'total_pairs': len(bfp_result_lists)
     }
 
 
@@ -317,3 +292,4 @@ def extract_query_pairs_results(bfp_result_lists: List, query_pairs: List[Dict]
     logger.info(f"[父文档工具] 提取完成: 总计 {total_count} 个查询对,{filtered_count} 个结果通过阈值过滤")
 
     return entity_results
+

+ 1 - 1
core/construction_review/component/reviewers/base_reviewer.py

@@ -36,7 +36,7 @@ class BaseReviewer(ABC):
         self.reference_source = ""
     
     #@obverse
-    async def review(self, name: str, trace_id: str, reviewer_type: str, prompt_name: str, review_content: str, review_references: str = None,
+    async def  review(self, name: str, trace_id: str, reviewer_type: str, prompt_name: str, review_content: str, review_references: str = None,
                     reference_source: str = None, review_location_label: str = None,state:str =None,stage_name:str = None, timeout: int = 60) -> ReviewResult:
         """
         执行审查

+ 7 - 7
core/construction_review/component/reviewers/semantic_logic.py

@@ -112,7 +112,7 @@ class SemanticLogicReviewer:
             result = ReviewResult(
                 success=True,
                 details={
-                    "name": "semantic_check",
+                    "name": "semantic_logic_check",
                     "response": model_response
                 },
                 error_message=None,
@@ -122,7 +122,7 @@ class SemanticLogicReviewer:
             # 推送审查完成信息
             if state and state.get("progress_manager"):
                 review_result_data = {
-                    'name': 'semantic_check',
+                    'name': 'semantic_logic_check',
                     'success': result.success,
                     'details': result.details,
                     'error_message': result.error_message,
@@ -136,13 +136,13 @@ class SemanticLogicReviewer:
                         stage_name=stage_name,
                         current=None,
                         status="processing",
-                        message=f"semantic_check 审查完成,耗时: {result.execution_time:.2f}s",
+                        message=f"semantic_logic_check 审查完成,耗时: {result.execution_time:.2f}s",
                         issues=[review_result_data],
                         event_type="processing"
                     )
                 )
                 
-            logger.info(f"semantic_check 审查完成,耗时: {result.execution_time:.2f}s")
+            logger.info(f"semantic_logic_check 审查完成,耗时: {result.execution_time:.2f}s")
             
             return result
             
@@ -154,7 +154,7 @@ class SemanticLogicReviewer:
             # 返回失败结果
             result = ReviewResult(
                 success=False,
-                details={"name": "semantic_check"},
+                details={"name": "semantic_logic_check"},
                 error_message=error_msg,
                 execution_time=execution_time
             )
@@ -162,7 +162,7 @@ class SemanticLogicReviewer:
             # 推送失败信息
             if state and state.get("progress_manager"):
                 review_result_data = {
-                    'name': 'semantic_check',
+                    'name': 'semantic_logic_check',
                     'success': False,
                     'details': result.details,
                     'error_message': error_msg,
@@ -176,7 +176,7 @@ class SemanticLogicReviewer:
                         stage_name=stage_name,
                         current=None,
                         status="processing",
-                        message=f"semantic_check 审查失败: {error_msg}",
+                        message=f"semantic_logic_check 审查失败: {error_msg}",
                         issues=[review_result_data],
                         event_type="processing"
                     )

+ 8 - 12
core/construction_review/workflows/ai_review_workflow.py

@@ -153,7 +153,6 @@ class AIReviewWorkflow:
         workflow.add_node("start", self._start_node)
         workflow.add_node("initialize_progress", self._initialize_progress_node)
         workflow.add_node("ai_review", self._ai_review_node)
-        workflow.add_node("save_results", self._save_results_node)
         workflow.add_node("complete", self._complete_node)
         workflow.add_node("error_handler", self._error_handler_node)
         workflow.add_node("terminate", self._terminate_node)  # 新增终止节点
@@ -162,24 +161,21 @@ class AIReviewWorkflow:
         workflow.add_edge("start", "initialize_progress")
         workflow.add_edge("initialize_progress", "ai_review")
 
-        # 删除默认边,由条件边控制路由
-        # workflow.add_edge("ai_review", "save_results")
-        workflow.add_edge("save_results", "complete")
-        workflow.add_edge("complete", END)
-        workflow.add_edge("error_handler", END)
-        workflow.add_edge("terminate", END)  # 终止节点也到 END
-
-        # 添加条件边(错误处理 + 终止检查)- 替代默认边
+        # 添加条件边(错误处理 + 终止检查)
         workflow.add_conditional_edges(
             "ai_review",
             self._should_terminate_or_error,
             {
-                "terminate": "terminate",  # 新增终止路径
-                "success": "save_results",
-                "error": "error_handler"
+                "terminate": "terminate",  # 终止路径
+                "success": "complete",  # 成功后直接完成
+                "error": "error_handler"  # 错误处理
             }
         )
 
+        workflow.add_edge("complete", END)
+        workflow.add_edge("error_handler", END)
+        workflow.add_edge("terminate", END)
+
         self.graph = workflow.compile()
         self._get_workflow_graph()
 

+ 1 - 1
foundation/ai/rag/retrieval/entities_enhance.py

@@ -31,7 +31,7 @@ class EntitiesEnhance():
             entity = query_pair['entity']
             search_keywords = query_pair['search_keywords']
             background = query_pair['background']
-
+            server_logger.info(f"正在处理实体:{entity},辅助搜索词:{search_keywords},背景:{background}")
             entity_list = run_async(retrieval_manager.entity_recall(
                 entity,
                 search_keywords,

+ 23 - 10
utils_test/RAG_Test/rag_pipeline_web/rag_pipeline_server.py

@@ -21,7 +21,7 @@ sys.path.insert(0, project_root)
 
 from core.construction_review.component.infrastructure.milvus import MilvusConfig, MilvusManager
 from core.construction_review.component.infrastructure.parent_tool import (
-    enhance_with_parent_docs,
+    enhance_with_parent_docs_grouped,
     extract_query_pairs_results
 )
 from foundation.ai.rag.retrieval.entities_enhance import entity_enhance
@@ -132,37 +132,50 @@ def rag_enhanced_check(query_content: str) -> dict:
 
         return pipeline_data
 
-    # Step 3: 父文档增强 (使用独立工具函数 - 显式返回)
+    # Step 3: 父文档增强(使用分组增强策略 - 每个查询对独立处理 + 按分数筛选)
     step3_start = time.time()
     try:
-        enhancement_result = enhance_with_parent_docs(milvus_manager, bfp_result_lists)
+        enhancement_result = enhance_with_parent_docs_grouped(
+            milvus_manager,
+            bfp_result_lists,
+            score_threshold=0.5,  # bfp_rerank_score 阈值
+            max_parents_per_pair=3  # 每个查询对最多3个父文档
+        )
         enhanced_results = enhancement_result['enhanced_results']
         enhanced_count = enhancement_result['enhanced_count']
         parent_docs = enhancement_result['parent_docs']
+        enhanced_pairs = enhancement_result.get('enhanced_pairs', 0)
+        total_pairs = enhancement_result.get('total_pairs', 0)
 
         # 保存增强后的结果
-        with open(os.path.join(project_root, "temp", "rag_pipeline_server", "enhance_with_parent_docs.json"), "w", encoding='utf-8') as f:
+        with open(os.path.join(project_root, "temp", "rag_pipeline_server", "enhance_with_parent_docs_grouped.json"), "w", encoding='utf-8') as f:
             json.dump(enhanced_results, f, ensure_ascii=False, indent=4)
 
-        logger.info(f"[RAG增强] 成功增强 {enhanced_count} 个结果")
-        logger.info(f"[RAG增强] 使用了 {len(parent_docs)} 个父文档")
+        logger.info(f"[RAG增强] 分组增强完成: {enhanced_pairs}/{total_pairs} 个查询对进行了增强")
+        logger.info(f"[RAG增强] 成功增强 {enhanced_count} 个结果,使用了 {len(parent_docs)} 个父文档")
 
         pipeline_data["steps"]["3_parent_doc_enhancement"] = {
-            "name": "父文档增强",
+            "name": "父文档增强(分组策略)",
             "execution_time": round(time.time() - step3_start, 3),
-            "input": {"bfp_results_count": len(bfp_result_lists)},
+            "input": {
+                "bfp_results_count": len(bfp_result_lists),
+                "score_threshold": 0.7,
+                "max_parents_per_pair": 3
+            },
             "output": {
                 "enhanced_count": enhanced_count,
                 "parent_docs_count": len(parent_docs),
+                "enhanced_pairs": enhanced_pairs,
+                "total_pairs": total_pairs,
                 "parent_docs": parent_docs,
                 "enhanced_results": enhanced_results
             }
         }
     except Exception as e:
-        logger.error(f"[RAG增强] 父文档增强失败: {e}")
+        logger.error(f"[RAG增强] 父文档增强失败: {e}", exc_info=True)
         enhanced_results = bfp_result_lists
         pipeline_data["steps"]["3_parent_doc_enhancement"] = {
-            "name": "父文档增强",
+            "name": "父文档增强(分组策略)",
             "execution_time": round(time.time() - step3_start, 3),
             "input": {"bfp_results_count": len(bfp_result_lists)},
             "output": {"error": str(e), "enhanced_results": enhanced_results}

+ 3 - 3
utils_test/Semantic_Logic_Test/SUMMARY.md

@@ -127,7 +127,7 @@ prompt_template = prompt_loader.get_prompt_template(
 # 推送审查完成信息
 if state and state.get("progress_manager"):
     review_result_data = {
-        'name': 'semantic_check',
+        'name': 'semantic_logic_check',
         'success': result.success,
         'details': result.details,
         'error_message': result.error_message,
@@ -141,7 +141,7 @@ if state and state.get("progress_manager"):
             stage_name=stage_name,
             current=None,
             status="processing",
-            message=f"semantic_check 审查完成,耗时: {result.execution_time:.2f}s",
+            message=f"semantic_logic_check 审查完成,耗时: {result.execution_time:.2f}s",
             issues=[review_result_data],
             event_type="processing"
         )
@@ -154,7 +154,7 @@ if state and state.get("progress_manager"):
 result = ReviewResult(
     success=True,
     details={
-        "name": "semantic_check",
+        "name": "semantic_logic_check",
         "response": model_response
     },
     error_message=None,

+ 3 - 3
utils_test/Semantic_Logic_Test/test_semantic_logic.py

@@ -115,7 +115,7 @@ class TestSemanticLogicReviewer:
             # 验证结果
             assert isinstance(result, ReviewResult)
             assert result.success is True
-            assert result.details["name"] == "semantic_check"
+            assert result.details["name"] == "semantic_logic_check"
             assert "审查结果" in result.details["response"]
             assert result.error_message is None
             assert result.execution_time is not None
@@ -162,7 +162,7 @@ class TestSemanticLogicReviewer:
             )
             
             assert result.success is True
-            assert result.details["name"] == "semantic_check"
+            assert result.details["name"] == "semantic_logic_check"
     
     @pytest.mark.asyncio
     async def test_check_semantic_logic_api_error(
@@ -232,7 +232,7 @@ class TestSemanticLogicReviewer:
             )
             
             assert result.success is True
-            assert result.details["name"] == "semantic_check"
+            assert result.details["name"] == "semantic_logic_check"
     
     @pytest.mark.asyncio
     async def test_check_semantic_logic_with_references(