Преглед изворни кода

Merge branch 'dev_sgsc_lpl' of CRBC-MaaS-Platform-Project/LQAgentPlatform into dev

fix(sgsc-时效性审查模型-xth): 整合时效性审查功能到主分支
LingMin пре 1 недеља
родитељ
комит
7ed0ee9c1f

+ 4 - 1
config/config.ini.template

@@ -132,11 +132,14 @@ LQ_QWEN3_8B_LQ_LORA_API_KEY=dummy
 MYSQL_HOST=192.168.92.61
 MYSQL_HOST=192.168.92.61
 MYSQL_PORT=13306
 MYSQL_PORT=13306
 MYSQL_USER=root
 MYSQL_USER=root
-MYSQL_PASSWORD=lq@123
+MYSQL_PASSWORD=Lq123456!
 MYSQL_DB=lq_db
 MYSQL_DB=lq_db
 MYSQL_MIN_SIZE=1
 MYSQL_MIN_SIZE=1
 MYSQL_MAX_SIZE=5
 MYSQL_MAX_SIZE=5
 MYSQL_AUTO_COMMIT=True
 MYSQL_AUTO_COMMIT=True
+MYSQL_CONNECT_TIMEOUT=30
+MYSQL_READ_TIMEOUT=60
+MYSQL_WRITE_TIMEOUT=30
 
 
 
 
 [pgvector]
 [pgvector]

+ 9 - 1
core/base/workflow_manager.py

@@ -664,13 +664,21 @@ class WorkflowManager:
 
 
             logger.info(f"AI审查配置: 最大审查数量={max_review_units}, 审查模式={review_mode}")
             logger.info(f"AI审查配置: 最大审查数量={max_review_units}, 审查模式={review_mode}")
 
 
+            # [新增] 初始化数据库连接池(用于时效性审查等新逻辑)
+            # Mock模式已取消:数据库连接失败时将抛出异常,不会静默使用Mock数据
+            from foundation.infrastructure.mysql.async_mysql_conn_pool import AsyncMySQLPool
+            db_pool = AsyncMySQLPool()
+            await db_pool.initialize()
+            logger.info("数据库连接池初始化成功")
+
             # 创建AI审查工作流实例(作为嵌套子图)
             # 创建AI审查工作流实例(作为嵌套子图)
             ai_workflow = AIReviewWorkflow(
             ai_workflow = AIReviewWorkflow(
                 task_file_info=task_file_info,
                 task_file_info=task_file_info,
                 structured_content=structured_content,
                 structured_content=structured_content,
                 progress_manager=state["progress_manager"],
                 progress_manager=state["progress_manager"],
                 max_review_units=max_review_units,
                 max_review_units=max_review_units,
-                review_mode=review_mode
+                review_mode=review_mode,
+                db_pool=db_pool
             )
             )
 
 
             # 执行AI审查(内部使用 LangGraph)
             # 执行AI审查(内部使用 LangGraph)

+ 6 - 3
core/construction_review/component/ai_review_engine.py

@@ -128,13 +128,14 @@ class Stage(Enum):
 class AIReviewEngine(BaseReviewer):
 class AIReviewEngine(BaseReviewer):
     """AI审查引擎 - 支持审查条目并发"""
     """AI审查引擎 - 支持审查条目并发"""
 
 
-    def __init__(self, task_file_info: TaskFileInfo = None, max_concurrent_reviews: int = 8):
+    def __init__(self, task_file_info: TaskFileInfo = None, max_concurrent_reviews: int = 8, db_pool=None):
         """
         """
         初始化AI审查引擎
         初始化AI审查引擎
 
 
         Args:
         Args:
             task_file_info: TaskFileInfo 实例,包含任务相关信息
             task_file_info: TaskFileInfo 实例,包含任务相关信息
             max_concurrent_reviews: 最大并发审查数量
             max_concurrent_reviews: 最大并发审查数量
+            db_pool: 数据库连接池(用于时效性审查等新逻辑)
         """
         """
         super().__init__()
         super().__init__()
 
 
@@ -152,6 +153,8 @@ class AIReviewEngine(BaseReviewer):
         self.semaphore = asyncio.Semaphore(max_concurrent_reviews)
         self.semaphore = asyncio.Semaphore(max_concurrent_reviews)
         self.milvus_collection = config_handler.get('milvus', 'MILVUS_COLLECTION', 'default')
         self.milvus_collection = config_handler.get('milvus', 'MILVUS_COLLECTION', 'default')
 
 
+        # [新增] 数据库连接池
+        self.db_pool = db_pool
 
 
         self.milvus = MilvusManager(MilvusConfig())
         self.milvus = MilvusManager(MilvusConfig())
         self.redis_client = get_redis_connection()   # 获取Redis连接
         self.redis_client = get_redis_connection()   # 获取Redis连接
@@ -1188,7 +1191,7 @@ class AIReviewEngine(BaseReviewer):
 
 
                     # 调用内容时效性审查器
                     # 调用内容时效性审查器
                     from core.construction_review.component.reviewers.timeliness_content_reviewer import ContentTimelinessReviewer
                     from core.construction_review.component.reviewers.timeliness_content_reviewer import ContentTimelinessReviewer
-                    async with ContentTimelinessReviewer(max_concurrent=max_concurrent) as reviewer:
+                    async with ContentTimelinessReviewer(max_concurrent=max_concurrent, db_pool=self.db_pool) as reviewer:
                         timeliness_content_results = await reviewer.review_tertiary_content(
                         timeliness_content_results = await reviewer.review_tertiary_content(
                             tertiary_details=tertiary_details,
                             tertiary_details=tertiary_details,
                             collection_name="first_bfp_collection_status",
                             collection_name="first_bfp_collection_status",
@@ -1306,7 +1309,7 @@ class AIReviewEngine(BaseReviewer):
 
 
                     # 调用带有SSE推送功能的review_all方法
                     # 调用带有SSE推送功能的review_all方法
                     from core.construction_review.component.reviewers.timeliness_basis_reviewer import BasisReviewService
                     from core.construction_review.component.reviewers.timeliness_basis_reviewer import BasisReviewService
-                    async with BasisReviewService(max_concurrent=max_concurrent) as service:
+                    async with BasisReviewService(max_concurrent=max_concurrent, db_pool=self.db_pool) as service:
                         timeliness_basis_review_results = await service.review_all(
                         timeliness_basis_review_results = await service.review_all(
                             basis_items,
                             basis_items,
                             collection_name="first_bfp_collection_status",
                             collection_name="first_bfp_collection_status",

+ 64 - 0
core/construction_review/component/doc_worker/pdf_worker/fulltext_extractor.py

@@ -38,6 +38,8 @@ class PdfFullTextExtractor(FullTextExtractor):
                 page = doc[page_num]
                 page = doc[page_num]
                 # # 提取文本,表格部分用 <表格></表格> 标签替换
                 # # 提取文本,表格部分用 <表格></表格> 标签替换
                 text = self._extract_text_with_table_placeholders(page)
                 text = self._extract_text_with_table_placeholders(page)
+                # 清理 PyMuPDF 添加的不必要空格
+                text = self._clean_extracted_text(text)
                 # 过滤页眉页脚
                 # 过滤页眉页脚
                 text = self._filter_header_footer(text)
                 text = self._filter_header_footer(text)
                 pages.append(
                 pages.append(
@@ -63,6 +65,68 @@ class PdfFullTextExtractor(FullTextExtractor):
 
 
         return pages
         return pages
 
 
+    def _clean_extracted_text(self, text: str) -> str:
+        """
+        清理提取的文本,移除 PyMuPDF 添加的不必要空格
+
+        问题:PyMuPDF 在提取 PDF 文本时,有时会在中文字符和数字/标点之间
+        添加不必要的空格(如 "(国务院令第279 号)" 变成 "(国务院令第279 号)")
+
+        处理规则:
+        1. 移除中文和数字之间的空格:第279 号 -> 第279号
+        2. 移除中文和中文标点之间的空格
+        3. 保留英文单词之间的空格
+        4. 保留换行符
+
+        Args:
+            text: 原始提取的文本
+
+        Returns:
+            清理后的文本
+        """
+        import re
+
+        if not text:
+            return text
+
+        # 定义中文字符范围(包括中文标点)
+        chinese_char = r'[\u4e00-\u9fff]'
+        chinese_punctuation = r'[\u3000-\u303f\uff00-\uffef]'
+        digit = r'[0-9]'
+        ascii_letter = r'[a-zA-Z]'
+
+        # 规则1: 中文数字 + 空格 + 数字中文 -> 移除空格
+        # 例:第279 号 -> 第279号,令 第 -> 令第
+        text = re.sub(r'(' + chinese_char + r') +(' + digit + r')', r'\1\2', text)
+        text = re.sub(r'(' + digit + r') +(' + chinese_char + r')', r'\1\2', text)
+
+        # 规则2: 中文 + 空格 + 中文标点 -> 移除空格
+        text = re.sub(r'(' + chinese_char + r') +(' + chinese_punctuation + r')', r'\1\2', text)
+        text = re.sub(r'(' + chinese_punctuation + r') +(' + chinese_char + r')', r'\1\2', text)
+
+        # 规则3: 连续中文之间的空格 -> 移除
+        text = re.sub(r'(' + chinese_char + r') +(' + chinese_char + r')', r'\1\2', text)
+
+        # 规则4: 括号内的数字空格处理
+        # 例:(279 号) -> (279号),[123 号] -> [123号]
+        text = re.sub(r'\((' + digit + r'+) +(' + chinese_char + r'+)\)', r'(\1\2)', text)
+        text = re.sub(r'((' + digit + r'+) +(' + chinese_char + r'+))', r'(\1\2)', text)
+        text = re.sub(r'\[(' + digit + r'+) +(' + chinese_char + r'+)\]', r'[\1\2]', text)
+
+        # 规则5: 处理编号格式中的空格,如 "GB 51-2001" 保持,但 "GB51 -2001" 修复
+        # 保留标准编号格式中的空格,但修复不合理的空格
+
+        # 规则6: 循环清理中文之间的多个连续空格
+        # 对于"建 设 工 程"这种情况,需要多次应用正则
+        max_iterations = 10  # 防止无限循环
+        for _ in range(max_iterations):
+            prev_text = text
+            text = re.sub(r'(' + chinese_char + r') +(' + chinese_char + r')', r'\1\2', text)
+            if text == prev_text:
+                break
+
+        return text
+
     def _filter_header_footer(self, text: str) -> str:
     def _filter_header_footer(self, text: str) -> str:
         """
         """
         过滤页眉页脚
         过滤页眉页脚

+ 5 - 1
core/construction_review/component/doc_worker/pdf_worker/hybrid_extractor.py

@@ -304,11 +304,15 @@ class HybridFullTextExtractor(FullTextExtractor):
                     except Exception as e:
                     except Exception as e:
                         logger.error(f"    {ocr_name} 失败,回退到本地提取: {e}")
                         logger.error(f"    {ocr_name} 失败,回退到本地提取: {e}")
                         raw_text = page.get_text()
                         raw_text = page.get_text()
+                        # 清理空格后过滤页眉页脚
+                        raw_text = self.local_extractor._clean_extracted_text(raw_text)
                         page_text = self.local_extractor._filter_header_footer(raw_text)
                         page_text = self.local_extractor._filter_header_footer(raw_text)
                 else:
                 else:
                     logger.debug(f"  [第 {page_num} 页] 无 table -> 走本地 PyMuPDF 提取")
                     logger.debug(f"  [第 {page_num} 页] 无 table -> 走本地 PyMuPDF 提取")
-                    
+
                     text_with_tables = self.local_extractor._extract_text_with_table_placeholders(page)
                     text_with_tables = self.local_extractor._extract_text_with_table_placeholders(page)
+                    # 清理空格后过滤页眉页脚
+                    text_with_tables = self.local_extractor._clean_extracted_text(text_with_tables)
                     page_text = self.local_extractor._filter_header_footer(text_with_tables)
                     page_text = self.local_extractor._filter_header_footer(text_with_tables)
 
 
                 # 组装结果
                 # 组装结果

+ 13 - 0
core/construction_review/component/reviewers/__init__.py

@@ -16,6 +16,14 @@ from .completeness_reviewer import (
     result_to_dict,
     result_to_dict,
 )
 )
 
 
+# 标准时效性审查(基于内存匹配规则,无LLM)
+from .standard_timeliness_reviewer import (
+    StandardTimelinessReviewer,
+    TimelinessReviewResult,
+    review_standards_timeliness,
+    review_standard_timeliness_with_standardized_output,
+)
+
 __all__ = [
 __all__ = [
     'BaseReviewer',
     'BaseReviewer',
     # 轻量级完整性审查
     # 轻量级完整性审查
@@ -26,4 +34,9 @@ __all__ = [
     'LightweightCompletenessResult',
     'LightweightCompletenessResult',
     'check_completeness_lightweight',
     'check_completeness_lightweight',
     'result_to_dict',
     'result_to_dict',
+    # 标准时效性审查
+    'StandardTimelinessReviewer',
+    'TimelinessReviewResult',
+    'review_standards_timeliness',
+    'review_standard_timeliness_with_standardized_output',
 ]
 ]

+ 84 - 108
core/construction_review/component/reviewers/completeness_reviewer.py

@@ -328,21 +328,20 @@ class LightweightCompletenessChecker:
             context = "【问题类型】未知"
             context = "【问题类型】未知"
             reference = ""
             reference = ""
 
 
-        prompt = f"""你是一位资深的工程施工方案审查专家。请根据以下问题上下文和规范参考信息,生成专业的审查建议。
+        prompt = f"""你是一位资深的工程施工方案审查专家。请根据以下问题上下文和规范参考信息,生成专业的补充建议。
 
 
 {context}
 {context}
 
 
 {reference}
 {reference}
 
 
-请用JSON格式输出审查建议,包含以下字段:
-- issue_point: 问题摘要(简洁明了,50字以内)
+请用JSON格式输出,只包含以下字段:
 - suggestion: 具体补充建议(详细可行,100-200字,包含具体应该补充的内容要点)
 - suggestion: 具体补充建议(详细可行,100-200字,包含具体应该补充的内容要点)
-- reason: 规范依据说明(引用具体规范要求,说明为什么需要补充)
 
 
 注意:
 注意:
 1. suggestion应该具体、可操作,引用规范中的具体内容要求
 1. suggestion应该具体、可操作,引用规范中的具体内容要求
 2. 使用专业的工程术语
 2. 使用专业的工程术语
 3. 语气应该是指导性的,帮助编制人员理解需要补充什么内容
 3. 语气应该是指导性的,帮助编制人员理解需要补充什么内容
+4. **必须符合现实逻辑**:建议内容应基于实际工程施工的可行性,不能提出不切实际、无法操作或与工程常识相悖的建议
 
 
 JSON输出:"""
 JSON输出:"""
         return prompt
         return prompt
@@ -361,8 +360,10 @@ JSON输出:"""
         """
         """
         使用大模型生成建议
         使用大模型生成建议
 
 
+        【修改】只生成 suggestion,issue_point 和 reason 由调用方简单拼接
+
         Returns:
         Returns:
-            Dict[str, str]: 包含 issue_point, suggestion, reason 的字典
+            Dict[str, str]: 包含 suggestion 的字典,或None使用回退逻辑
         """
         """
         if not self.model_client:
         if not self.model_client:
             return None
             return None
@@ -392,14 +393,12 @@ JSON输出:"""
                 trace_id=trace_id,
                 trace_id=trace_id,
                 task_prompt_info=task_prompt_info,
                 task_prompt_info=task_prompt_info,
                 timeout=timeout,
                 timeout=timeout,
-                model_name="qwen"  # 使用默认模型,可根据需要调整
+                model_name="qwen"
             )
             )
 
 
             # 解析模型返回的JSON
             # 解析模型返回的JSON
             try:
             try:
-                # 尝试从返回文本中提取JSON
                 response_text = model_response.strip()
                 response_text = model_response.strip()
-                # 查找JSON块
                 if "```json" in response_text:
                 if "```json" in response_text:
                     json_str = response_text.split("```json")[1].split("```")[0].strip()
                     json_str = response_text.split("```json")[1].split("```")[0].strip()
                 elif "```" in response_text:
                 elif "```" in response_text:
@@ -408,13 +407,12 @@ JSON输出:"""
                     json_str = response_text
                     json_str = response_text
 
 
                 result = json.loads(json_str)
                 result = json.loads(json_str)
+                # 只返回 suggestion,issue_point 和 reason 由调用方处理
                 return {
                 return {
-                    "issue_point": result.get("issue_point", ""),
-                    "suggestion": result.get("suggestion", ""),
-                    "reason": result.get("reason", "")
+                    "suggestion": result.get("suggestion", "")
                 }
                 }
             except (json.JSONDecodeError, IndexError) as e:
             except (json.JSONDecodeError, IndexError) as e:
-                logger.warning(f"LLM建议生成结果解析失败: {e},返回: {model_response[:200]}")
+                logger.warning(f"LLM建议生成结果解析失败: {e}")
                 return None
                 return None
 
 
         except Exception as e:
         except Exception as e:
@@ -898,7 +896,11 @@ JSON输出:"""
 
 
             # ── 一级缺失 ──────────────────────────────────────────────
             # ── 一级缺失 ──────────────────────────────────────────────
             if first_code not in actual_first:
             if first_code not in actual_first:
-                # 尝试使用LLM生成建议
+                # issue_point 和 reason 使用简单拼接
+                issue_point = f"【一级章节缺失】'{first_name}'整个章节不存在"
+                reason = f"依据《桥梁公司危险性较大工程管理实施细则(2025版)》规定,文档必须包含'{first_name}'一级章节,当前正文中未发现该章节任何内容"
+
+                # 尝试使用LLM生成 suggestion
                 llm_result = await self._generate_recommendation_with_llm(
                 llm_result = await self._generate_recommendation_with_llm(
                     level="一级",
                     level="一级",
                     first_code=first_code,
                     first_code=first_code,
@@ -906,28 +908,20 @@ JSON输出:"""
                     first_seq=first_seq
                     first_seq=first_seq
                 )
                 )
 
 
-                if llm_result:
-                    recommendations.append({
-                        "level": "一级",
-                        "issue_point": llm_result.get("issue_point", f"【一级章节缺失】'{first_name}'整个章节不存在"),
-                        "location": first_name,
-                        "suggestion": llm_result.get("suggestion", f"请添加'{first_name}'章节及其下全部子章节内容"),
-                        "reason": llm_result.get("reason", f"根据规范要求,文档必须包含'{first_name}'一级章节,当前正文中未发现该章节任何内容"),
-                        "first_seq": first_seq,
-                    })
+                if llm_result and llm_result.get("suggestion"):
+                    suggestion = llm_result.get("suggestion")
                 else:
                 else:
                     # 回退到简单拼接
                     # 回退到简单拼接
-                    recommendations.append({
-                        "level": "一级",
-                        "issue_point": f"【一级章节缺失】'{first_name}'整个章节不存在",
-                        "location": first_name,
-                        "suggestion": f"请添加'{first_name}'章节及其下全部子章节内容",
-                        "reason": (
-                            f"根据规范要求,文档必须包含'{first_name}'一级章节,"
-                            f"当前正文中未发现该章节任何内容"
-                        ),
-                        "first_seq": first_seq,
-                    })
+                    suggestion = f"请添加'{first_name}'章节及其下全部子章节内容"
+
+                recommendations.append({
+                    "level": "一级",
+                    "issue_point": issue_point,
+                    "location": first_name,
+                    "suggestion": suggestion,
+                    "reason": reason,
+                    "first_seq": first_seq,
+                })
                 continue
                 continue
 
 
             # ── 一级存在,检查二级 ─────────────────────────────────────
             # ── 一级存在,检查二级 ─────────────────────────────────────
@@ -941,7 +935,11 @@ JSON输出:"""
 
 
                 # ── 二级缺失 ──────────────────────────────────────────
                 # ── 二级缺失 ──────────────────────────────────────────
                 if (cat1, cat2) not in actual_secondary:
                 if (cat1, cat2) not in actual_secondary:
-                    # 尝试使用LLM生成建议
+                    # issue_point 和 reason 使用简单拼接
+                    issue_point = f"【二级章节缺失】{first_name} > '{second_name}'整个章节不存在"
+                    reason = f"依据《桥梁公司危险性较大工程管理实施细则(2025版)》规定,'{first_name}'下应包含'{second_name}'二级章节,当前正文中未发现该章节内容"
+
+                    # 尝试使用LLM生成 suggestion
                     llm_result = await self._generate_recommendation_with_llm(
                     llm_result = await self._generate_recommendation_with_llm(
                         level="二级",
                         level="二级",
                         first_code=cat1,
                         first_code=cat1,
@@ -950,32 +948,21 @@ JSON输出:"""
                         second_name=second_name
                         second_name=second_name
                     )
                     )
 
 
-                    if llm_result:
-                        recommendations.append({
-                            "level": "二级",
-                            "issue_point": llm_result.get("issue_point", f"【二级章节缺失】{first_name} > '{second_name}'整个章节不存在"),
-                            "location": f"{first_name} > {second_name}",
-                            "suggestion": llm_result.get("suggestion", f"请在'{first_name}'下添加'{second_name}'章节内容"),
-                            "reason": llm_result.get("reason", f"根据规范要求,'{first_name}'下应包含'{second_name}'二级章节,当前正文中未发现该章节内容"),
-                            "first_seq": first_seq,
-                            "second_seq": second_seq,
-                        })
+                    if llm_result and llm_result.get("suggestion"):
+                        suggestion = llm_result.get("suggestion")
                     else:
                     else:
                         # 回退到简单拼接
                         # 回退到简单拼接
-                        recommendations.append({
-                            "level": "二级",
-                            "issue_point": (
-                                f"【二级章节缺失】{first_name} > '{second_name}'整个章节不存在"
-                            ),
-                            "location": f"{first_name} > {second_name}",
-                            "suggestion": f"请在'{first_name}'下添加'{second_name}'章节内容",
-                            "reason": (
-                                f"根据规范要求,'{first_name}'下应包含'{second_name}'二级章节,"
-                                f"当前正文中未发现该章节内容"
-                            ),
-                            "first_seq": first_seq,
-                            "second_seq": second_seq,
-                        })
+                        suggestion = f"请在'{first_name}'下添加'{second_name}'章节内容"
+
+                    recommendations.append({
+                        "level": "二级",
+                        "issue_point": issue_point,
+                        "location": f"{first_name} > {second_name}",
+                        "suggestion": suggestion,
+                        "reason": reason,
+                        "first_seq": first_seq,
+                        "second_seq": second_seq,
+                    })
                     continue
                     continue
 
 
                 # ── 二级存在,检查三级缺失 ────────────────────────────
                 # ── 二级存在,检查三级缺失 ────────────────────────────
@@ -993,7 +980,8 @@ JSON输出:"""
                 if not missing_t_items:
                 if not missing_t_items:
                     continue
                     continue
 
 
-                # 尝试使用LLM批量生成三级缺失建议
+                # issue_point 和 reason 使用简单拼接(三级缺失)
+                # 尝试使用LLM批量生成 suggestion
                 llm_result = await self._generate_recommendation_with_llm(
                 llm_result = await self._generate_recommendation_with_llm(
                     level="三级",
                     level="三级",
                     first_code=cat1,
                     first_code=cat1,
@@ -1003,34 +991,26 @@ JSON输出:"""
                     tertiary_items=missing_t_items
                     tertiary_items=missing_t_items
                 )
                 )
 
 
-                if llm_result:
-                    # LLM生成了整体建议,为每个缺失项添加相同建议(但位置不同)
-                    for t_item in missing_t_items:
-                        recommendations.append({
-                            "level": "三级",
-                            "issue_point": f"【三级内容缺失】{first_name} > {second_name} > '{t_item.third_cn}'",
-                            "location": f"{first_name} > {second_name}",
-                            "suggestion": llm_result.get("suggestion", f"请补充'{second_name}'下的'{t_item.third_cn}'内容"),
-                            "reason": llm_result.get("reason", f"'{second_name}'下缺失规范要求的'{t_item.third_cn}'内容要点"),
-                            "first_seq": first_seq,
-                            "second_seq": second_seq,
-                            "third_seq": t_item.third_seq,
-                        })
+                if llm_result and llm_result.get("suggestion"):
+                    suggestion = llm_result.get("suggestion")
                 else:
                 else:
-                    # 回退到简单拼接
-                    for t_item in missing_t_items:
-                        recommendations.append({
-                            "level": "三级",
-                            "issue_point": (
-                                f"【三级内容缺失】{first_name} > {second_name} > '{t_item.third_cn}'"
-                            ),
-                            "location": f"{first_name} > {second_name}",
-                            "suggestion": f"请补充'{second_name}'下的'{t_item.third_cn}'内容",
-                            "reason": f"'{second_name}'下缺失规范要求的'{t_item.third_cn}'内容要点",
-                            "first_seq": first_seq,
-                            "second_seq": second_seq,
-                            "third_seq": t_item.third_seq,
-                        })
+                    # 回退到简单拼接:列出所有缺失项
+                    missing_names = "、".join([t.third_cn for t in missing_t_items[:5]])
+                    if len(missing_t_items) > 5:
+                        missing_names += f"等{len(missing_t_items)}项内容"
+                    suggestion = f"请补充'{second_name}'下的{missing_names}"
+
+                for t_item in missing_t_items:
+                    recommendations.append({
+                        "level": "三级",
+                        "issue_point": f"【三级内容缺失】{first_name} > {second_name} > '{t_item.third_cn}'",
+                        "location": f"{first_name} > {second_name}",
+                        "suggestion": suggestion,
+                        "reason": f"依据《桥梁公司危险性较大工程管理实施细则(2025版)》规定,'{second_name}'下应包含'{t_item.third_cn}'内容要点",
+                        "first_seq": first_seq,
+                        "second_seq": second_seq,
+                        "third_seq": t_item.third_seq,
+                    })
 
 
         # ── 一致性审查:目录有列但正文无内容 ─────────────────────────────
         # ── 一致性审查:目录有列但正文无内容 ─────────────────────────────
         if outline_result:
         if outline_result:
@@ -1039,7 +1019,11 @@ JSON输出:"""
                 sec_title = e.get("outline_title") or e.get("secondary_name", "")
                 sec_title = e.get("outline_title") or e.get("secondary_name", "")
                 location = f"{f_name} > {sec_title}" if f_name else sec_title
                 location = f"{f_name} > {sec_title}" if f_name else sec_title
 
 
-                # 尝试使用LLM生成建议
+                # issue_point 和 reason 使用简单拼接(一致性审查)
+                issue_point = f"【目录正文不一致】'{location}'目录已列但正文无内容"
+                reason = f"依据《桥梁公司危险性较大工程管理实施细则(2025版)》规定,目录应与正文保持一致。目录页列有'{sec_title}'章节,但正文中未发现对应内容"
+
+                # 尝试使用LLM生成 suggestion
                 llm_result = await self._generate_recommendation_with_llm(
                 llm_result = await self._generate_recommendation_with_llm(
                     level="一致性",
                     level="一致性",
                     first_code="",
                     first_code="",
@@ -1048,27 +1032,19 @@ JSON输出:"""
                     outline_title=sec_title
                     outline_title=sec_title
                 )
                 )
 
 
-                if llm_result:
-                    recommendations.append({
-                        "level": "一致性",
-                        "issue_point": llm_result.get("issue_point", f"【目录正文不一致】'{location}'目录已列但正文无内容"),
-                        "location": location,
-                        "suggestion": llm_result.get("suggestion", f"请补充'{sec_title}'章节的正文内容,或从目录中移除该章节"),
-                        "reason": llm_result.get("reason", f"目录页列有'{sec_title}'章节,但正文中未发现对应内容,存在目录与正文不一致的问题"),
-                    })
+                if llm_result and llm_result.get("suggestion"):
+                    suggestion = llm_result.get("suggestion")
                 else:
                 else:
-                    recommendations.append({
-                        "level": "一致性",
-                        "issue_point": f"【目录正文不一致】'{location}'目录已列但正文无内容",
-                        "location": location,
-                        "suggestion": (
-                            f"请补充'{sec_title}'章节的正文内容,或从目录中移除该章节"
-                        ),
-                        "reason": (
-                            f"目录页列有'{sec_title}'章节,但正文中未发现对应内容,"
-                            f"存在目录与正文不一致的问题"
-                        ),
-                    })
+                    # 回退到简单拼接
+                    suggestion = f"请补充'{sec_title}'章节的正文内容,或从目录中移除该章节"
+
+                recommendations.append({
+                    "level": "一致性",
+                    "issue_point": issue_point,
+                    "location": location,
+                    "suggestion": suggestion,
+                    "reason": reason,
+                })
 
 
         if not recommendations:
         if not recommendations:
             recommendations.append({
             recommendations.append({
@@ -1076,7 +1052,7 @@ JSON输出:"""
                 "issue_point": "文档完整性良好",
                 "issue_point": "文档完整性良好",
                 "location": "",
                 "location": "",
                 "suggestion": "无需补充",
                 "suggestion": "无需补充",
-                "reason": "文档已覆盖规范要求的所有章节与内容要点",
+                "reason": "依据《桥梁公司危险性较大工程管理实施细则(2025版)》规定,文档已覆盖所有章节与内容要点",
             })
             })
 
 
         return recommendations
         return recommendations

+ 361 - 0
core/construction_review/component/reviewers/standard_timeliness_reviewer.py

@@ -0,0 +1,361 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+标准时效性审查器 - 基于内存匹配规则
+
+使用 StandardMatchingService 替代原有的向量搜索+LLM判断方式,
+提供更快速、准确的标准时效性审查功能。
+
+使用示例:
+    # 方法1: 使用便捷函数
+    from foundation.infrastructure.mysql.async_mysql_conn_pool import AsyncMySQLPool
+
+    db_pool = AsyncMySQLPool()
+    await db_pool.initialize()
+
+    results = await review_standards_timeliness(
+        standards_list=[
+            {"standard_name": "铁路桥涵设计规范", "standard_number": "TB 10002-2017"},
+            {"standard_name": "起重机 钢丝绳 保养、维护、检验和报废", "standard_number": "GB/T 5972-2016"},
+        ],
+        db_pool=db_pool
+    )
+
+    # 方法2: 使用异步上下文管理器
+    async with StandardTimelinessReviewer(db_pool=db_pool) as reviewer:
+        results = reviewer.review_standards(standards_list)
+"""
+import asyncio
+from typing import List, Dict, Any, Optional
+from dataclasses import dataclass, asdict
+
+from foundation.observability.logger.loggering import review_logger as logger
+from core.construction_review.component.standard_matching import (
+    StandardMatchingService,
+    StandardMatchResult,
+    MatchResultCode,
+)
+
+
+@dataclass
+class TimelinessReviewResult:
+    """时效性审查结果"""
+    seq_no: int                              # 序号
+    standard_name: str                       # 原始标准名称
+    standard_number: str                     # 原始标准号
+    process_result: str                      # 处理结果
+    status_code: str                         # 状态码
+    has_issue: bool                          # 是否有问题
+    issue_type: Optional[str] = None         # 问题类型
+    suggestion: Optional[str] = None         # 建议
+    reason: Optional[str] = None             # 原因
+    risk_level: str = "low"                  # 风险等级(与原有逻辑一致:low/high)
+    replacement_name: Optional[str] = None   # 替代标准名称
+    replacement_number: Optional[str] = None # 替代标准号
+    final_result: Optional[str] = None       # 最终结果描述
+
+    def to_dict(self) -> Dict[str, Any]:
+        """转换为字典"""
+        return asdict(self)
+
+
+class StandardTimelinessReviewer:
+    """
+    标准时效性审查器
+
+    基于 StandardMatchingService 提供的内存匹配功能,
+    对标准列表进行时效性审查。
+    """
+
+    def __init__(self, db_pool=None, standard_service: Optional[StandardMatchingService] = None):
+        """
+        初始化审查器
+
+        Args:
+            db_pool: 数据库连接池,用于初始化 StandardMatchingService(如未提供standard_service则必填)
+            standard_service: 已初始化的 StandardMatchingService 实例(优先级高于 db_pool)
+
+        Raises:
+            RuntimeError: 当db_pool和standard_service都为None时抛出异常
+        """
+        if standard_service is None and not db_pool:
+            raise RuntimeError(
+                "StandardTimelinessReviewer 初始化失败: 必须提供数据库连接池(db_pool)或已初始化的StandardMatchingService实例。\n"
+                "Mock模式已取消,请确保数据库连接正常。"
+            )
+        self.db_pool = db_pool
+        self._service = standard_service
+        self._own_service = False  # 标记是否由本实例创建 service
+
+    async def __aenter__(self):
+        """异步上下文管理器入口"""
+        if self._service is None:
+            # own_db_pool=False 因为 db_pool 是外部传入的,不应该由本服务关闭
+            self._service = StandardMatchingService(self.db_pool, own_db_pool=False)
+            await self._service.initialize()
+            self._own_service = True
+        return self
+
+    async def __aexit__(self, exc_type, exc_val, exc_tb):
+        """异步上下文管理器出口"""
+        if self._own_service and self._service:
+            await self._service.close()
+        return False
+
+    def review_standards(self, standards: List[Dict[str, str]]) -> List[TimelinessReviewResult]:
+        """
+        审查标准列表的时效性
+
+        Args:
+            standards: 标准列表,每个元素包含:
+                - standard_name: 标准名称
+                - standard_number: 标准号
+
+        Returns:
+            List[TimelinessReviewResult]: 审查结果列表
+        """
+        if not self._service:
+            raise RuntimeError("服务未初始化,请使用异步上下文管理器或调用 initialize()")
+
+        # 使用 StandardMatchingService 进行匹配
+        match_results = self._service.check_standards(standards)
+
+        # 转换为时效性审查结果
+        review_results = []
+        for match_result in match_results:
+            review_result = self._convert_match_to_review_result(match_result)
+            review_results.append(review_result)
+
+        return review_results
+
+    def review_single(self, standard_name: str, standard_number: str, seq_no: int = 1) -> TimelinessReviewResult:
+        """
+        审查单个标准的时效性
+
+        Args:
+            standard_name: 标准名称
+            standard_number: 标准号
+            seq_no: 序号
+
+        Returns:
+            TimelinessReviewResult: 审查结果
+        """
+        if not self._service:
+            raise RuntimeError("服务未初始化,请使用异步上下文管理器或调用 initialize()")
+
+        match_result = self._service.check_single(seq_no, standard_name, standard_number)
+        return self._convert_match_to_review_result(match_result)
+
+    def _convert_match_to_review_result(self, match_result: StandardMatchResult) -> TimelinessReviewResult:
+        """
+        将匹配结果转换为时效性审查结果
+
+        Args:
+            match_result: 标准匹配结果
+
+        Returns:
+            TimelinessReviewResult: 时效性审查结果
+        """
+        # 根据状态码确定是否有问题和风险等级
+        status_code = match_result.status_code
+
+        if status_code == MatchResultCode.OK.value:
+            # 正常状态 - 无风险
+            return TimelinessReviewResult(
+                seq_no=match_result.seq_no,
+                standard_name=match_result.original_name,
+                standard_number=match_result.original_number,
+                process_result=match_result.process_result,
+                status_code=status_code,
+                has_issue=False,
+                risk_level="low",
+                final_result=match_result.final_result
+            )
+
+        elif status_code == MatchResultCode.SUBSTITUTED.value:
+            # 被替代 - high(与原有逻辑一致)
+            return TimelinessReviewResult(
+                seq_no=match_result.seq_no,
+                standard_name=match_result.original_name,
+                standard_number=match_result.original_number,
+                process_result=match_result.process_result,
+                status_code=status_code,
+                has_issue=True,
+                issue_type="标准被替代",
+                suggestion=f"请更新为现行标准: {match_result.substitute_name}{match_result.substitute_number}",
+                reason=match_result.final_result,
+                risk_level="high",
+                replacement_name=match_result.substitute_name,
+                replacement_number=match_result.substitute_number,
+                final_result=match_result.final_result
+            )
+
+        elif status_code == MatchResultCode.ABOLISHED.value:
+            # 废止无替代 - high(与原有逻辑一致)
+            return TimelinessReviewResult(
+                seq_no=match_result.seq_no,
+                standard_name=match_result.original_name,
+                standard_number=match_result.original_number,
+                process_result=match_result.process_result,
+                status_code=status_code,
+                has_issue=True,
+                issue_type="标准已废止",
+                suggestion="该标准已废止且无现行替代,请检查是否仍需引用或寻找其他替代方案",
+                reason=match_result.final_result,
+                risk_level="high",
+                final_result=match_result.final_result
+            )
+
+        elif status_code == MatchResultCode.MISMATCH.value:
+            # 不匹配 - high(与原有逻辑一致:编号错误属于high)
+            return TimelinessReviewResult(
+                seq_no=match_result.seq_no,
+                standard_name=match_result.original_name,
+                standard_number=match_result.original_number,
+                process_result=match_result.process_result,
+                status_code=status_code,
+                has_issue=True,
+                issue_type="标准信息不匹配",
+                suggestion=f"名称与标准号不匹配,实际应为: {match_result.substitute_name}{match_result.substitute_number}",
+                reason=match_result.final_result,
+                risk_level="high",
+                replacement_name=match_result.substitute_name,
+                replacement_number=match_result.substitute_number,
+                final_result=match_result.final_result
+            )
+
+        elif status_code == MatchResultCode.NOT_FOUND.value:
+            # 标准库不存在 - 直接过滤,不返回问题
+            return TimelinessReviewResult(
+                seq_no=match_result.seq_no,
+                standard_name=match_result.original_name,
+                standard_number=match_result.original_number,
+                process_result=match_result.process_result,
+                status_code=status_code,
+                has_issue=False,
+                risk_level="low",
+                final_result=match_result.final_result
+            )
+
+        else:
+            # 未知状态
+            logger.warning(f"未知的匹配状态码: {status_code}")
+            return TimelinessReviewResult(
+                seq_no=match_result.seq_no,
+                standard_name=match_result.original_name,
+                standard_number=match_result.original_number,
+                process_result="未知",
+                status_code=status_code,
+                has_issue=True,
+                issue_type="未知状态",
+                reason=match_result.final_result,
+                risk_level="medium",
+                final_result=match_result.final_result
+            )
+
+    def convert_to_standardized_format(
+        self,
+        review_results: List[TimelinessReviewResult],
+        check_item: str = "timeliness_check",
+        chapter_code: str = "basis",
+        check_item_code: str = "standard_timeliness_check"
+    ) -> List[Dict[str, Any]]:
+        """
+        将审查结果转换为标准格式(兼容原有审查系统)
+
+        Args:
+            review_results: 审查结果列表
+            check_item: 检查项名称
+            chapter_code: 章节代码
+            check_item_code: 检查项代码
+
+        Returns:
+            List[Dict[str, Any]]: 标准格式的审查结果
+        """
+        standardized_results = []
+
+        for result in review_results:
+            # 标准库不存在或无问题的结果直接过滤,不返回
+            if result.status_code == MatchResultCode.NOT_FOUND.value or not result.has_issue:
+                continue
+            else:
+                # 有问题
+                standardized_results.append({
+                    "check_item": check_item,
+                    "chapter_code": chapter_code,
+                    "check_item_code": check_item_code,
+                    "check_result": {
+                        "location": f"《{result.standard_name}》({result.standard_number})",
+                        "description": result.reason or result.final_result,
+                        "suggestion": result.suggestion,
+                        "issue_type": result.issue_type,
+                        "standard_name": result.standard_name,
+                        "standard_number": result.standard_number,
+                        "replacement_name": result.replacement_name,
+                        "replacement_number": result.replacement_number,
+                    },
+                    "exist_issue": True,
+                    "risk_info": {"risk_level": result.risk_level}
+                })
+
+        return standardized_results
+
+
+# ========== 便捷函数 ==========
+
+async def review_standards_timeliness(
+    standards_list: List[Dict[str, str]],
+    db_pool=None,
+    standard_service: Optional[StandardMatchingService] = None
+) -> List[TimelinessReviewResult]:
+    """
+    审查标准列表时效性的便捷函数
+
+    Args:
+        standards_list: 标准列表,每个元素包含 standard_name 和 standard_number
+        db_pool: 数据库连接池
+        standard_service: 已初始化的 StandardMatchingService 实例(优先级高于 db_pool)
+
+    Returns:
+        List[TimelinessReviewResult]: 审查结果列表
+
+    示例:
+        results = await review_standards_timeliness(
+            standards_list=[
+                {"standard_name": "铁路桥涵设计规范", "standard_number": "TB 10002-2017"},
+                {"standard_name": "起重机 钢丝绳 保养、维护、检验和报废", "standard_number": "GB/T 5972-2016"},
+            ],
+            db_pool=db_pool
+        )
+    """
+    async with StandardTimelinessReviewer(db_pool=db_pool, standard_service=standard_service) as reviewer:
+        return reviewer.review_standards(standards_list)
+
+
+async def review_standard_timeliness_with_standardized_output(
+    standards_list: List[Dict[str, str]],
+    db_pool=None,
+    standard_service: Optional[StandardMatchingService] = None,
+    check_item: str = "timeliness_check",
+    chapter_code: str = "basis",
+    check_item_code: str = "standard_timeliness_check"
+) -> List[Dict[str, Any]]:
+    """
+    审查标准列表时效性并输出标准格式的便捷函数
+
+    Args:
+        standards_list: 标准列表
+        db_pool: 数据库连接池
+        standard_service: 已初始化的 StandardMatchingService 实例
+        check_item: 检查项名称
+        chapter_code: 章节代码
+        check_item_code: 检查项代码
+
+    Returns:
+        List[Dict[str, Any]]: 标准格式的审查结果
+    """
+    async with StandardTimelinessReviewer(db_pool=db_pool, standard_service=standard_service) as reviewer:
+        review_results = reviewer.review_standards(standards_list)
+        return reviewer.convert_to_standardized_format(
+            review_results, check_item, chapter_code, check_item_code
+        )

+ 269 - 197
core/construction_review/component/reviewers/timeliness_basis_reviewer.py

@@ -3,17 +3,26 @@ from __future__ import annotations
 import json
 import json
 import time
 import time
 import asyncio
 import asyncio
-from typing import Any, Dict, List
+import re
+from typing import Any, Dict, List, Optional, Tuple
 from functools import partial
 from functools import partial
 
 
-from langchain_milvus import Milvus, BM25BuiltInFunction
-from foundation.infrastructure.config.config import config_handler
-from foundation.ai.models.model_handler import model_handler as mh
+# [已注释] 旧的向量搜索和LLM判断相关导入
+# from langchain_milvus import Milvus, BM25BuiltInFunction
+# from foundation.infrastructure.config.config import config_handler
+# from foundation.ai.models.model_handler import model_handler as mh
 from core.construction_review.component.reviewers.utils.inter_tool import InterTool
 from core.construction_review.component.reviewers.utils.inter_tool import InterTool
 from core.construction_review.component.reviewers.utils.directory_extraction import BasisItems, BasisItem
 from core.construction_review.component.reviewers.utils.directory_extraction import BasisItems, BasisItem
 from foundation.observability.logger.loggering import review_logger as logger
 from foundation.observability.logger.loggering import review_logger as logger
-from core.construction_review.component.reviewers.utils.reference_matcher import match_reference_files
-from core.construction_review.component.reviewers.utils.timeliness_determiner import determine_timeliness_issue
+# [已注释] 旧的匹配和判定逻辑
+# from core.construction_review.component.reviewers.utils.reference_matcher import match_reference_files
+# from core.construction_review.component.reviewers.utils.timeliness_determiner import determine_timeliness_issue
+
+# [新增] 新的标准时效性审查模块
+from core.construction_review.component.reviewers.standard_timeliness_reviewer import (
+    StandardTimelinessReviewer,
+    review_standard_timeliness_with_standardized_output,
+)
 
 
 class StandardizedResponseProcessor:
 class StandardizedResponseProcessor:
     """标准化响应处理器"""
     """标准化响应处理器"""
@@ -26,7 +35,7 @@ class StandardizedResponseProcessor:
         处理LLM响应,返回标准格式
         处理LLM响应,返回标准格式
 
 
         Args:
         Args:
-            response_text: LLM原始响应文本(JSON字符串)
+            response_text: LLM原始响应文本(JSON字符串)
             check_name: 检查项名称
             check_name: 检查项名称
             chapter_code: 章节代码
             chapter_code: 章节代码
             check_item_code: 检查项代码
             check_item_code: 检查项代码
@@ -64,143 +73,246 @@ class StandardizedResponseProcessor:
             }]
             }]
 
 
 
 
-class BasisSearchEngine:
-    """编制依据向量搜索引擎"""
-
-    # 类级别的缓存,避免重复创建 Milvus 实例
-    _vectorstore_cache = {}
-
-    def __init__(self):
-        self.emdmodel = None
-        self.host = None
-        self.port = None
-        self.user = None
-        self.password = None
-        self._initialize()
-
-    def _initialize(self):
-        """初始化搜索引擎"""
-        try:
-            # 连接配置
-            self.host = config_handler.get('milvus', 'MILVUS_HOST', 'localhost')
-            self.port = int(config_handler.get('milvus', 'MILVUS_PORT', '19530'))
-            self.user = config_handler.get('milvus', 'MILVUS_USER')
-            self.password = config_handler.get('milvus', 'MILVUS_PASSWORD')
-
-            # 初始化嵌入模型
-            self.emdmodel = mh._get_lq_qwen3_8b_emd()
-            logger.info("嵌入模型初始化成功")
-
-        except Exception as e:
-            logger.error(f" BasisSearchEngine 初始化失败: {e}")
-
-    def _get_vectorstore(self, collection_name: str):
-        """获取或创建 Milvus vectorstore 实例(使用缓存)"""
-        cache_key = f"{self.host}:{self.port}:{collection_name}"
-
-        if cache_key not in BasisSearchEngine._vectorstore_cache:
-            connection_args = {
-                "uri": f"http://{self.host}:{self.port}",
-                "user": self.user,
-                "db_name": "lq_db"
-            }
-            if self.password:
-                connection_args["password"] = self.password
-
-            # 抑制 AsyncMilvusClient 的警告日志
-            import logging
-            original_level = logging.getLogger('pymilvus').level
-            logging.getLogger('pymilvus').setLevel(logging.ERROR)
-
-            try:
-                vectorstore = Milvus(
-                    embedding_function=self.emdmodel,
-                    collection_name=collection_name,
-                    connection_args=connection_args,
-                    consistency_level="Strong",
-                    builtin_function=BM25BuiltInFunction(),
-                    vector_field=["dense", "sparse"]
-                )
-                BasisSearchEngine._vectorstore_cache[cache_key] = vectorstore
-                logger.info(f"创建并缓存 Milvus 连接: {cache_key}")
-            finally:
-                logging.getLogger('pymilvus').setLevel(original_level)
-
-        return BasisSearchEngine._vectorstore_cache[cache_key]
-
-    def hybrid_search(self, collection_name: str, query_text: str,
-                     top_k: int = 3, ranker_type: str = "weighted",
-                     dense_weight: float = 0.7, sparse_weight: float = 0.3):
-        try:
-            # 使用缓存的 vectorstore
-            vectorstore = self._get_vectorstore(collection_name)
-
-            # 执行混合搜索
-            if ranker_type == "weighted":
-                results = vectorstore.similarity_search(
-                    query=query_text,
-                    k=top_k,
-                    ranker_type="weighted",
-                    ranker_params={"weights": [dense_weight, sparse_weight]}
-                )
-            else:  # rrf
-                results = vectorstore.similarity_search(
-                    query=query_text,
-                    k=top_k,
-                    ranker_type="rrf",
-                    ranker_params={"k": 60}
-                )
-
-            # 格式化结果,保持与其他搜索方法一致
-            formatted_results = []
-            for doc in results:
-                formatted_results.append({
-                    'id': doc.metadata.get('pk', 0),
-                    'text_content': doc.page_content,
-                    'metadata': doc.metadata,
-                    'distance': 0.0,
-                    'similarity': 1.0
-                })
-
-            return formatted_results
-
-        except Exception as e:
-            # 回退到传统的向量搜索
-            logger.error(f" 搜索失败: {e}")
+# [已注释] 旧的向量搜索引擎类,已被新的规则匹配替代
+# class BasisSearchEngine:
+#     """编制依据向量搜索引擎"""
+#
+#     # 类级别的缓存,避免重复创建 Milvus 实例
+#     _vectorstore_cache = {}
+#
+#     def __init__(self):
+#         self.emdmodel = None
+#         self.host = None
+#         self.port = None
+#         self.user = None
+#         self.password = None
+#         self._initialize()
+#
+#     def _initialize(self):
+#         """初始化搜索引擎"""
+#         try:
+#             # 连接配置
+#             self.host = config_handler.get('milvus', 'MILVUS_HOST', 'localhost')
+#             self.port = int(config_handler.get('milvus', 'MILVUS_PORT', '19530'))
+#             self.user = config_handler.get('milvus', 'MILVUS_USER')
+#             self.password = config_handler.get('milvus', 'MILVUS_PASSWORD')
+#
+#             # 初始化嵌入模型
+#             self.emdmodel = mh._get_lq_qwen3_8b_emd()
+#             logger.info("嵌入模型初始化成功")
+#
+#         except Exception as e:
+#             logger.error(f" BasisSearchEngine 初始化失败: {e}")
+#
+#     def _get_vectorstore(self, collection_name: str):
+#         """获取或创建 Milvus vectorstore 实例(使用缓存)"""
+#         cache_key = f"{self.host}:{self.port}:{collection_name}"
+#
+#         if cache_key not in BasisSearchEngine._vectorstore_cache:
+#             connection_args = {
+#                 "uri": f"http://{self.host}:{self.port}",
+#                 "user": self.user,
+#                 "db_name": "lq_db"
+#             }
+#             if self.password:
+#                 connection_args["password"] = self.password
+#
+#             # 抑制 AsyncMilvusClient 的警告日志
+#             import logging
+#             original_level = logging.getLogger('pymilvus').level
+#             logging.getLogger('pymilvus').setLevel(logging.ERROR)
+#
+#             try:
+#                 vectorstore = Milvus(
+#                     embedding_function=self.emdmodel,
+#                     collection_name=collection_name,
+#                     connection_args=connection_args,
+#                     consistency_level="Strong",
+#                     builtin_function=BM25BuiltInFunction(),
+#                     vector_field=["dense", "sparse"]
+#                 )
+#                 BasisSearchEngine._vectorstore_cache[cache_key] = vectorstore
+#                 logger.info(f"创建并缓存 Milvus 连接: {cache_key}")
+#             finally:
+#                 logging.getLogger('pymilvus').setLevel(original_level)
+#
+#         return BasisSearchEngine._vectorstore_cache[cache_key]
+#
+#     def hybrid_search(self, collection_name: str, query_text: str,
+#                      top_k: int = 3, ranker_type: str = "weighted",
+#                      dense_weight: float = 0.7, sparse_weight: float = 0.3):
+#         try:
+#             # 使用缓存的 vectorstore
+#             vectorstore = self._get_vectorstore(collection_name)
+#
+#             # 执行混合搜索
+#             if ranker_type == "weighted":
+#                 results = vectorstore.similarity_search(
+#                     query=query_text,
+#                     k=top_k,
+#                     ranker_type="weighted",
+#                     ranker_params={"weights": [dense_weight, sparse_weight]}
+#                 )
+#             else:  # rrf
+#                 results = vectorstore.similarity_search(
+#                     query=query_text,
+#                     k=top_k,
+#                     ranker_type="rrf",
+#                     ranker_params={"k": 60}
+#                 )
+#
+#             # 格式化结果,保持与其他搜索方法一致
+#             formatted_results = []
+#             for doc in results:
+#                 formatted_results.append({
+#                     'id': doc.metadata.get('pk', 0),
+#                     'text_content': doc.page_content,
+#                     'metadata': doc.metadata,
+#                     'distance': 0.0,
+#                     'similarity': 1.0
+#                 })
+#
+#             return formatted_results
+#
+#         except Exception as e:
+#             # 回退到传统的向量搜索
+#             logger.error(f" 搜索失败: {e}")
 
 
 
 
 class BasisReviewService:
 class BasisReviewService:
     """编制依据审查服务核心类"""
     """编制依据审查服务核心类"""
 
 
-    def __init__(self, max_concurrent: int = 4):
-        self.search_engine = BasisSearchEngine()
-        self.response_processor = StandardizedResponseProcessor()
+    def __init__(self, max_concurrent: int = 4, db_pool=None):
+        # [已注释] 旧的向量搜索引擎
+        # self.search_engine = BasisSearchEngine()
+        # self.response_processor = StandardizedResponseProcessor()
         self.max_concurrent = max_concurrent
         self.max_concurrent = max_concurrent
         self._semaphore = None
         self._semaphore = None
+        self.db_pool = db_pool
+        self._timeliness_reviewer = None
 
 
     async def __aenter__(self):
     async def __aenter__(self):
         """异步上下文管理器入口"""
         """异步上下文管理器入口"""
         if self._semaphore is None:
         if self._semaphore is None:
             self._semaphore = asyncio.Semaphore(self.max_concurrent)
             self._semaphore = asyncio.Semaphore(self.max_concurrent)
+        # [新增] 初始化新的时效性审查器
+        if self._timeliness_reviewer is None:
+            self._timeliness_reviewer = StandardTimelinessReviewer(db_pool=self.db_pool)
+            # 预初始化数据(如果还没初始化)
+            if not self._timeliness_reviewer._service or not self._timeliness_reviewer._service._initialized:
+                await self._timeliness_reviewer.__aenter__()
         return self
         return self
 
 
     async def __aexit__(self, exc_type, exc_val, exc_tb):
     async def __aexit__(self, exc_type, exc_val, exc_tb):
         """异步上下文管理器出口"""
         """异步上下文管理器出口"""
+        # [新增] 关闭时效性审查器
+        if self._timeliness_reviewer:
+            await self._timeliness_reviewer.__aexit__(exc_type, exc_val, exc_tb)
         return False
         return False
 
 
+    def _extract_standard_from_basis(self, basis_text: str) -> Optional[Dict[str, str]]:
+        """
+        [新增] 从编制依据文本中提取标准名称和编号
+
+        支持格式:
+        - 《标准名称》(标准号)
+        - 《标准名称》(标准号)其他文字
+        - 标准名称(标准号)
+        """
+        if not basis_text:
+            return None
+
+        # 模式1: 《名称》(编号)
+        pattern1 = r'《([^《》]+)》\s*(([^)]+))'
+        match = re.search(pattern1, basis_text)
+        if match:
+            return {
+                "standard_name": match.group(1).strip(),
+                "standard_number": match.group(2).strip()
+            }
+
+        # 模式2: 《名称》(编号) - 半角括号
+        pattern2 = r'《([^《》]+)》\s*\(([^)]+)\)'
+        match = re.search(pattern2, basis_text)
+        if match:
+            return {
+                "standard_name": match.group(1).strip(),
+                "standard_number": match.group(2).strip()
+            }
+
+        # 模式3: 尝试匹配标准号格式(如 GB 1234-2020)
+        standard_pattern = r'([A-Z]{2,6}(?:/[A-Z])?\s*\d{1,6}(?:\.\d)?(?:-\d{4})?)'
+        std_match = re.search(standard_pattern, basis_text.upper())
+        if std_match:
+            standard_number = std_match.group(1).strip()
+            # 尝试提取名称(在编号前的书名号内)
+            name_match = re.search(r'《([^《》]+)》', basis_text)
+            if name_match:
+                return {
+                    "standard_name": name_match.group(1).strip(),
+                    "standard_number": standard_number
+                }
+            # 如果没有书名号,使用空名称
+            return {
+                "standard_name": "",
+                "standard_number": standard_number
+            }
+
+        return None
+
     async def review_batch(
     async def review_batch(
         self,
         self,
         basis_items: List[str],
         basis_items: List[str],
-        collection_name: str = "first_bfp_collection_status",
-        top_k_each: int = 10,  # 增加召回数量,提高精确匹配机会
+        collection_name: str = "first_bfp_collection_status",  # [保留参数但不再使用]
+        top_k_each: int = 10,  # [保留参数但不再使用]
     ) -> List[Dict[str, Any]]:
     ) -> List[Dict[str, Any]]:
-        """异步批次审查(通常3条)"""
+        """
+        [已修改] 异步批次审查(通常3条)
+
+        新逻辑:使用基于内存的规则匹配替代向量搜索+LLM判断
+        """
         basis_items = [x for x in (basis_items or []) if isinstance(x, str) and x.strip()]
         basis_items = [x for x in (basis_items or []) if isinstance(x, str) and x.strip()]
         if not basis_items:
         if not basis_items:
             return []
             return []
 
 
         async with self._semaphore:
         async with self._semaphore:
             try:
             try:
+                # [新增] 从编制依据中提取标准信息
+                standards_list = []
+                for basis in basis_items:
+                    std_info = self._extract_standard_from_basis(basis)
+                    if std_info:
+                        standards_list.append(std_info)
+                        logger.debug(f"提取到标准: {std_info['standard_name']} ({std_info['standard_number']})")
+                    else:
+                        logger.warning(f"无法从编制依据提取标准信息: {basis}")
+
+                if not standards_list:
+                    logger.info(f"批次中未提取到有效标准信息,跳过审查")
+                    return []
+
+                # [新增] 使用新的时效性审查逻辑
+                if not self._timeliness_reviewer:
+                    raise RuntimeError("时效性审查器未初始化,请使用异步上下文管理器")
+
+                review_results = self._timeliness_reviewer.review_standards(standards_list)
+
+                # 转换为标准格式
+                standardized_results = self._timeliness_reviewer.convert_to_standardized_format(
+                    review_results,
+                    check_item="timeliness_check",
+                    chapter_code="basis",
+                    check_item_code="basis_timeliness_check"
+                )
+
+                # 统计结果
+                issue_count = sum(1 for item in standardized_results if item.get('exist_issue', False))
+                logger.info(f"编制依据批次审查完成:总计 {len(standards_list)} 项,发现问题 {issue_count} 项")
+
+                return standardized_results
+
+                # [已注释] 旧的向量搜索+LLM判断逻辑
+                """
                 # 并发搜索每个编制依据
                 # 并发搜索每个编制依据
                 search_tasks = []
                 search_tasks = []
                 for basis in basis_items:
                 for basis in basis_items:
@@ -218,77 +330,15 @@ class BasisReviewService:
                         logger.error(f"搜索失败 '{basis_items[i]}': {result}")
                         logger.error(f"搜索失败 '{basis_items[i]}': {result}")
                         grouped_candidates.append([])
                         grouped_candidates.append([])
                     else:
                     else:
-                        # result 是 List[dict],需要遍历
                         texts = [item["text_content"] for item in result if "text_content" in item]
                         texts = [item["text_content"] for item in result if "text_content" in item]
                         grouped_candidates.append(texts)
                         grouped_candidates.append(texts)
-                
-                # 获取match_reference_files的结果并过滤
-                match_result = await match_reference_files(reference_text=grouped_candidates, review_text=basis_items)
 
 
-                # 记录完整的匹配结果用于调试
-                logger.info(f"批次 match_reference_files 原始结果: {match_result[:500]}...")
-
-                # 解析JSON并过滤:保留有相关信息的项
-                try:
-                    match_data = json.loads(match_result)
-                    # 提取items字段(match_reference_files返回{items: [...]}格式)
-                    items = match_data.get('items', match_data) if isinstance(match_data, dict) else match_data
-
-                    logger.info(f"解析到 {len(items)} 个匹配项")
-                    for idx, item in enumerate(items):
-                        logger.info(f"  项{idx}: review_item={item.get('review_item', 'unknown')}, "
-                                  f"has_related_file={item.get('has_related_file')}, "
-                                  f"exact_match_info={item.get('exact_match_info')}, "
-                                  f"same_name_current={item.get('same_name_current')}")
-
-                    # 放宽过滤条件:只要有相关文件信息就进行审查
-                    filtered_data = [
-                        item for item in items
-                        if item.get('has_related_file') or
-                           item.get('exact_match_info') or
-                           item.get('same_name_current')
-                    ]
-
-                    logger.info(f"过滤后保留 {len(filtered_data)} 个项")
-
-                    # 记录被过滤掉的项目用于调试
-                    skipped_items = [
-                        item for item in items
-                        if not (item.get('has_related_file') or
-                               item.get('exact_match_info') or
-                               item.get('same_name_current'))
-                    ]
-                    if skipped_items:
-                        logger.warning(f"跳过了 {len(skipped_items)} 个无参考信息的编制依据: "
-                                     f"{[item.get('review_item', 'unknown') for item in skipped_items]}")
-
-                    # 如果没有过滤出数据,直接返回空结果
-                    if not filtered_data:
-                        logger.info(f"过滤后没有符合条件的编制依据,跳过后续检查")
-                        standardized_result = []
-                    else:
-                        # 重新构建JSON格式
-                        if isinstance(match_data, dict) and 'items' in match_data:
-                            match_result = json.dumps({"items": filtered_data}, ensure_ascii=False, indent=2)
-                        else:
-                            match_result = json.dumps(filtered_data, ensure_ascii=False, indent=2)
-                        
-                        llm_out = await determine_timeliness_issue(match_result)
-                        
-                        standardized_result = self.response_processor.process_llm_response(llm_out, "timeliness_check", "basis", "basis_timeliness_check")
-                        # 统计问题数量
-                        issue_count = sum(1 for item in standardized_result if item.get('exist_issue', False))
-                        logger.info(f"编制依据批次审查完成:总计 {len(filtered_data)} 项,发现问题 {issue_count} 项")
-                    
-                    return standardized_result if standardized_result else []
-                    
-                except (json.JSONDecodeError, TypeError) as e:
-                    logger.warning(f"过滤match_reference_files结果时出错: {e}")
-                    # 如果解析失败,返回空结果
-                    return []
+                match_result = await match_reference_files(reference_text=grouped_candidates, review_text=basis_items)
+                ...  # 其余旧逻辑已省略
+                """
 
 
             except Exception as e:
             except Exception as e:
-                logger.error(f" 批次处理失败: {e}")
+                logger.error(f"批次处理失败: {e}")
                 return [{
                 return [{
                     "check_item": "timeliness_check",
                     "check_item": "timeliness_check",
                     "chapter_code": "basis",
                     "chapter_code": "basis",
@@ -298,15 +348,15 @@ class BasisReviewService:
                     "risk_info": {"risk_level": "high"}
                     "risk_info": {"risk_level": "high"}
                 }]
                 }]
 
 
-    
-    
+    # [已注释] 旧的向量搜索方法,已被新的规则匹配替代
+    """
     async def _async_search_basis(
     async def _async_search_basis(
         self,
         self,
         basis: str,
         basis: str,
         collection_name: str,
         collection_name: str,
         top_k_each: int
         top_k_each: int
     ) -> List[dict]:
     ) -> List[dict]:
-        """异步搜索单个编制依据(Hybrid Search)"""
+        # 异步搜索单个编制依据(Hybrid Search)
         try:
         try:
             loop = asyncio.get_running_loop()
             loop = asyncio.get_running_loop()
             func = partial(
             func = partial(
@@ -324,11 +374,11 @@ class BasisReviewService:
         except Exception as e:
         except Exception as e:
             logger.error(f" 搜索失败 '{basis}': {e}")
             logger.error(f" 搜索失败 '{basis}': {e}")
             return []
             return []
+    """
 
 
-    
     async def review_all(self, basis_items: BasisItems, collection_name: str = "first_bfp_collection_status",
     async def review_all(self, basis_items: BasisItems, collection_name: str = "first_bfp_collection_status",
                         progress_manager=None, callback_task_id: str = None) -> List[List[Dict[str, Any]]]:
                         progress_manager=None, callback_task_id: str = None) -> List[List[Dict[str, Any]]]:
-        """异步批量审查所有编制依据(入参为 BasisItems)"""
+        """异步批量审查所有编制依据(入参为 BasisItems)"""
         if not basis_items or not getattr(basis_items, "items", None):
         if not basis_items or not getattr(basis_items, "items", None):
             return []
             return []
 
 
@@ -339,7 +389,7 @@ class BasisReviewService:
         start_time = time.time()
         start_time = time.time()
         total_batches = (len(items) + 2) // 3  # 计算总批次数
         total_batches = (len(items) + 2) // 3  # 计算总批次数
         
         
-        # 发送开始审查的SSE推送(使用独立命名空间,避免与主流程进度冲突)
+        # 发送开始审查的SSE推送(使用独立命名空间,避免与主流程进度冲突)
         if progress_manager and callback_task_id:
         if progress_manager and callback_task_id:
             try:
             try:
                 await progress_manager.update_stage_progress(
                 await progress_manager.update_stage_progress(
@@ -373,7 +423,7 @@ class BasisReviewService:
                     if isinstance(item, dict) and item.get('is_standard', False):
                     if isinstance(item, dict) and item.get('is_standard', False):
                         batch_standard_count += 1
                         batch_standard_count += 1
 
 
-                # 立即推送当前批次完成的SSE消息(使用独立命名空间)
+                # 立即推送当前批次完成的SSE消息(使用独立命名空间)
                 logger.info(f"批次{batch_index + 1}完成,准备推送SSE")
                 logger.info(f"批次{batch_index + 1}完成,准备推送SSE")
                 if progress_manager and callback_task_id:
                 if progress_manager and callback_task_id:
                     try:
                     try:
@@ -398,7 +448,7 @@ class BasisReviewService:
                 error_result = [{"name": name, "is_standard": False, "status": "", "meg": f"批次处理失败2: {str(e)}"}
                 error_result = [{"name": name, "is_standard": False, "status": "", "meg": f"批次处理失败2: {str(e)}"}
                                 for name in batch]
                                 for name in batch]
 
 
-                # 即使失败也要推送结果(使用独立命名空间)
+                # 即使失败也要推送结果(使用独立命名空间)
                 if progress_manager and callback_task_id:
                 if progress_manager and callback_task_id:
                     try:
                     try:
                         await progress_manager.update_stage_progress(
                         await progress_manager.update_stage_progress(
@@ -463,7 +513,7 @@ class BasisReviewService:
         logger.info(f"并发执行完成,成功批次: {successful_batches}/{total_batches}")
         logger.info(f"并发执行完成,成功批次: {successful_batches}/{total_batches}")
 
 
 
 
-        # 发送完成审查的SSE推送(使用独立命名空间,不设置current避免覆盖主流程进度)
+        # 发送完成审查的SSE推送(使用独立命名空间,不设置current避免覆盖主流程进度)
         elapsed_time = time.time() - start_time
         elapsed_time = time.time() - start_time
         if progress_manager and callback_task_id:
         if progress_manager and callback_task_id:
             try:
             try:
@@ -486,15 +536,37 @@ class BasisReviewService:
 
 
 
 
 # 便捷函数
 # 便捷函数
-async def review_basis_batch_async(basis_items: List[str], max_concurrent: int = 4) -> List[Dict[str, Any]]:
-    """异步批次审查便捷函数"""
-    async with BasisReviewService(max_concurrent=max_concurrent) as service:
+async def review_basis_batch_async(
+    basis_items: List[str],
+    max_concurrent: int = 4,
+    db_pool=None
+) -> List[Dict[str, Any]]:
+    """
+    [已修改] 异步批次审查便捷函数
+
+    Args:
+        basis_items: 编制依据列表
+        max_concurrent: 最大并发数
+        db_pool: 数据库连接池(用于新的规则匹配)
+    """
+    async with BasisReviewService(max_concurrent=max_concurrent, db_pool=db_pool) as service:
         return await service.review_batch(basis_items)
         return await service.review_batch(basis_items)
 
 
 
 
-async def review_all_basis_async(basis_items: BasisItems, max_concurrent: int = 4) -> List[List[Dict[str, Any]]]:
-    """异步全部审查便捷函数(BasisItems 入参)"""
-    async with BasisReviewService(max_concurrent=max_concurrent) as service:
+async def review_all_basis_async(
+    basis_items: BasisItems,
+    max_concurrent: int = 4,
+    db_pool=None
+) -> List[List[Dict[str, Any]]]:
+    """
+    [已修改] 异步全部审查便捷函数(BasisItems 入参)
+
+    Args:
+        basis_items: BasisItems 对象
+        max_concurrent: 最大并发数
+        db_pool: 数据库连接池(用于新的规则匹配)
+    """
+    async with BasisReviewService(max_concurrent=max_concurrent, db_pool=db_pool) as service:
         return await service.review_all(basis_items)
         return await service.review_all(basis_items)
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":

+ 128 - 144
core/construction_review/component/reviewers/timeliness_content_reviewer.py

@@ -15,9 +15,15 @@ from dataclasses import dataclass, field
 from functools import partial
 from functools import partial
 
 
 from foundation.observability.logger.loggering import review_logger as logger
 from foundation.observability.logger.loggering import review_logger as logger
-from core.construction_review.component.reviewers.utils.reference_matcher import match_reference_files
-from core.construction_review.component.reviewers.utils.timeliness_determiner import determine_timeliness_issue
-from core.construction_review.component.reviewers.timeliness_basis_reviewer import BasisSearchEngine, StandardizedResponseProcessor
+# [已注释] 旧的向量搜索和LLM判断相关导入
+# from core.construction_review.component.reviewers.utils.reference_matcher import match_reference_files
+# from core.construction_review.component.reviewers.utils.timeliness_determiner import determine_timeliness_issue
+# from core.construction_review.component.reviewers.timeliness_basis_reviewer import BasisSearchEngine, StandardizedResponseProcessor
+
+# [新增] 新的标准时效性审查模块
+from core.construction_review.component.reviewers.standard_timeliness_reviewer import (
+    StandardTimelinessReviewer,
+)
 
 
 
 
 @dataclass
 @dataclass
@@ -32,13 +38,13 @@ class StandardReference:
 
 
 @dataclass
 @dataclass
 class ContentTimelinessResult:
 class ContentTimelinessResult:
-    """内容时效性审查结果"""
+    """内容时效性审查结果(保留用于兼容,新逻辑中不再直接使用)"""
     reference: StandardReference
     reference: StandardReference
     has_issue: bool
     has_issue: bool
     issue_type: str              # 问题类型
     issue_type: str              # 问题类型
     suggestion: str
     suggestion: str
     reason: str
     reason: str
-    risk_level: str              # 无风险 / 高风险
+    risk_level: str              # 风险等级(与原有逻辑一致:无风险/高风险
 
 
 
 
 class StandardExtractor:
 class StandardExtractor:
@@ -169,21 +175,32 @@ class StandardExtractor:
 class ContentTimelinessReviewer:
 class ContentTimelinessReviewer:
     """三级分类内容时效性审查器"""
     """三级分类内容时效性审查器"""
 
 
-    def __init__(self, max_concurrent: int = 4):
+    def __init__(self, max_concurrent: int = 4, db_pool=None):
         self.extractor = StandardExtractor()
         self.extractor = StandardExtractor()
-        self.search_engine = BasisSearchEngine()
-        self.response_processor = StandardizedResponseProcessor()
+        # [已注释] 旧的向量搜索引擎
+        # self.search_engine = BasisSearchEngine()
+        # self.response_processor = StandardizedResponseProcessor()
         self.max_concurrent = max_concurrent
         self.max_concurrent = max_concurrent
         self._semaphore = None
         self._semaphore = None
+        self.db_pool = db_pool
+        self._timeliness_reviewer = None
 
 
     async def __aenter__(self):
     async def __aenter__(self):
         """异步上下文管理器入口"""
         """异步上下文管理器入口"""
         if self._semaphore is None:
         if self._semaphore is None:
             self._semaphore = asyncio.Semaphore(self.max_concurrent)
             self._semaphore = asyncio.Semaphore(self.max_concurrent)
+        # [新增] 初始化新的时效性审查器
+        if self._timeliness_reviewer is None:
+            self._timeliness_reviewer = StandardTimelinessReviewer(db_pool=self.db_pool)
+            if not self._timeliness_reviewer._service or not self._timeliness_reviewer._service._initialized:
+                await self._timeliness_reviewer.__aenter__()
         return self
         return self
 
 
     async def __aexit__(self, exc_type, exc_val, exc_tb):
     async def __aexit__(self, exc_type, exc_val, exc_tb):
         """异步上下文管理器出口"""
         """异步上下文管理器出口"""
+        # [新增] 关闭时效性审查器
+        if self._timeliness_reviewer:
+            await self._timeliness_reviewer.__aexit__(exc_type, exc_val, exc_tb)
         return False
         return False
 
 
     async def review_tertiary_content(
     async def review_tertiary_content(
@@ -241,6 +258,94 @@ class ContentTimelinessReviewer:
         # 2. 对提取的规范进行时效性审查
         # 2. 对提取的规范进行时效性审查
         all_issues = []
         all_issues = []
 
 
+        # [新增] 构建标准列表用于规则匹配
+        standards_list = []
+        for ref in all_references:
+            standards_list.append({
+                "standard_name": ref.name,
+                "standard_number": ref.number
+            })
+
+        if not standards_list:
+            logger.info("未提取到有效标准信息")
+            return []
+
+        # [新增] 使用新的时效性审查逻辑
+        if not self._timeliness_reviewer:
+            raise RuntimeError("时效性审查器未初始化,请使用异步上下文管理器")
+
+        try:
+            async with self._semaphore:
+                # 执行规则匹配审查
+                review_results = self._timeliness_reviewer.review_standards(standards_list)
+
+                # 转换为标准格式
+                standardized_results = self._timeliness_reviewer.convert_to_standardized_format(
+                    review_results,
+                    check_item="content_timeliness_check",
+                    chapter_code="content",
+                    check_item_code="content_timeliness_check"
+                )
+
+                # 增强结果:添加位置信息
+                for item in standardized_results:
+                    # 构建原始引用文本(《名称》(编号))
+                    std_name = item.get("check_result", {}).get("standard_name", "")
+                    std_number = item.get("check_result", {}).get("standard_number", "")
+                    review_item_text = f"《{std_name}》({std_number})"
+
+                    if review_item_text in reference_to_location:
+                        locations = reference_to_location[review_item_text]
+                        # 添加位置信息到结果
+                        item["location_info"] = locations
+                        # 添加三级分类上下文
+                        contexts = []
+                        for loc in locations:
+                            ctx = f"[{loc.get('third_category_name', '')}] 第{loc.get('start_line', 0)}-{loc.get('end_line', 0)}行"
+                            contexts.append(ctx)
+                        item["content_context"] = "; ".join(contexts)
+
+                        # 更新location字段为更详细的描述
+                        if contexts:
+                            item["check_result"]["location"] = f"{review_item_text}(出现在:{item['content_context']})"
+
+                all_issues.extend(standardized_results)
+
+                # 统计结果
+                issue_count = sum(1 for item in standardized_results if item.get("exist_issue", False))
+                logger.info(f"内容时效性审查完成:总计 {len(standards_list)} 项引用,发现问题 {issue_count} 项")
+
+                # SSE推送(如果提供了progress_manager)
+                if progress_manager and callback_task_id:
+                    try:
+                        await progress_manager.update_stage_progress(
+                            callback_task_id=callback_task_id,
+                            stage_name="内容时效性审查",
+                            status="processing",
+                            message=f"完成内容时效性审查,{len(standards_list)}项,发现问题{issue_count}项",
+                            overall_task_status="processing",
+                            event_type="processing",
+                            issues=standardized_results
+                        )
+                    except Exception as e:
+                        logger.error(f"SSE推送失败: {e}")
+
+        except Exception as e:
+            logger.error(f"时效性审查处理失败: {e}")
+            error_result = {
+                "check_item": "content_timeliness_check",
+                "chapter_code": "content",
+                "check_item_code": "content_timeliness_check",
+                "check_result": {"error": str(e)},
+                "exist_issue": True,
+                "risk_info": {"risk_level": "medium"}
+            }
+            all_issues.append(error_result)
+
+        return all_issues
+
+        # [已注释] 旧的向量搜索+LLM判断逻辑
+        """
         # 分批处理(每批3个)
         # 分批处理(每批3个)
         batch_size = 3
         batch_size = 3
         ref_texts = [ref.original_text for ref in all_references]
         ref_texts = [ref.original_text for ref in all_references]
@@ -262,145 +367,21 @@ class ContentTimelinessReviewer:
                         search_tasks.append(task)
                         search_tasks.append(task)
 
 
                     search_results = await asyncio.gather(*search_tasks, return_exceptions=True)
                     search_results = await asyncio.gather(*search_tasks, return_exceptions=True)
-
-                    # 构建参考文本列表
-                    grouped_candidates = []
-                    for j, result in enumerate(search_results):
-                        if isinstance(result, Exception):
-                            logger.error(f"搜索失败 '{batch_refs[j].original_text}': {result}")
-                            grouped_candidates.append([])
-                        else:
-                            texts = [item.get("text_content", "") for item in result if item]
-                            grouped_candidates.append(texts)
-
-                    # 匹配参考文件
-                    match_result = await match_reference_files(
-                        reference_text=grouped_candidates,
-                        review_text=batch_texts
-                    )
-
-                    # 记录完整的匹配结果用于调试
-                    logger.info(f"批次{batch_num} match_reference_files 原始结果: {match_result[:500]}...")
-
-                    # 过滤:保留有相关信息的项进行审查
-                    # 条件:has_related_file为true 或 exact_match_info不为空 或 same_name_current不为空
-                    try:
-                        match_data = json.loads(match_result)
-                        items = match_data.get('items', match_data) if isinstance(match_data, dict) else match_data
-
-                        logger.info(f"批次{batch_num} 解析到 {len(items)} 个匹配项")
-                        for idx, item in enumerate(items):
-                            logger.info(f"  项{idx}: review_item={item.get('review_item', 'unknown')}, "
-                                      f"has_related_file={item.get('has_related_file')}, "
-                                      f"exact_match_info={item.get('exact_match_info')}, "
-                                      f"same_name_current={item.get('same_name_current')}")
-
-                        # 放宽过滤条件:只要有相关文件信息就进行审查
-                        filtered_data = [
-                            item for item in items
-                            if item.get('has_related_file') or
-                               item.get('exact_match_info') or
-                               item.get('same_name_current')
-                        ]
-
-                        logger.info(f"批次{batch_num} 过滤后保留 {len(filtered_data)} 个项")
-
-                        # 记录被过滤掉的项目用于调试
-                        skipped_items = [
-                            item for item in items
-                            if not (item.get('has_related_file') or
-                                   item.get('exact_match_info') or
-                                   item.get('same_name_current'))
-                        ]
-                        if skipped_items:
-                            logger.warning(f"批次{batch_num} 跳过了 {len(skipped_items)} 个无参考信息的项: "
-                                         f"{[item.get('review_item', 'unknown') for item in skipped_items]}")
-
-                        if not filtered_data:
-                            logger.info(f"批次{batch_num}: 没有符合审查条件的规范引用")
-                            continue
-
-                        # 重新构建JSON
-                        if isinstance(match_data, dict) and 'items' in match_data:
-                            match_result = json.dumps({"items": filtered_data}, ensure_ascii=False)
-                        else:
-                            match_result = json.dumps(filtered_data, ensure_ascii=False)
-
-                        # 判定时效性问题
-                        llm_out = await determine_timeliness_issue(match_result)
-
-                        # 处理响应
-                        standardized_result = self.response_processor.process_llm_response(
-                            llm_out,
-                            "content_timeliness_check",
-                            "content",
-                            "content_timeliness_check"
-                        )
-
-                        # 3. 增强结果:添加位置信息
-                        for item in standardized_result:
-                            review_item = item.get("check_result", {}).get("location", "")
-                            if review_item in reference_to_location:
-                                locations = reference_to_location[review_item]
-                                # 添加位置信息到结果
-                                item["location_info"] = locations
-                                # 添加三级分类上下文
-                                contexts = []
-                                for loc in locations:
-                                    ctx = f"[{loc.get('third_category_name', '')}] 第{loc.get('start_line', 0)}-{loc.get('end_line', 0)}行"
-                                    contexts.append(ctx)
-                                item["content_context"] = "; ".join(contexts)
-
-                                # 更新location字段为更详细的描述
-                                if contexts:
-                                    item["check_result"]["location"] = f"{review_item}(出现在:{item['content_context']})"
-
-                        all_issues.extend(standardized_result)
-
-                        # SSE推送(如果提供了progress_manager)
-                        if progress_manager and callback_task_id:
-                            try:
-                                await progress_manager.update_stage_progress(
-                                    callback_task_id=callback_task_id,
-                                    stage_name=f"内容时效性审查-批次{batch_num}",
-                                    status="processing",
-                                    message=f"完成第{batch_num}/{total_batches}批次内容时效性审查,{len(batch_refs)}项",
-                                    overall_task_status="processing",
-                                    event_type="processing",
-                                    issues=standardized_result
-                                )
-                            except Exception as e:
-                                logger.error(f"SSE推送失败: {e}")
-
-                    except (json.JSONDecodeError, TypeError) as e:
-                        logger.warning(f"处理匹配结果时出错: {e}")
-                        continue
-
+                    ...  # 其余旧逻辑已省略
             except Exception as e:
             except Exception as e:
                 logger.error(f"批次 {batch_num} 处理失败: {e}")
                 logger.error(f"批次 {batch_num} 处理失败: {e}")
-                error_result = {
-                    "check_item": "content_timeliness_check",
-                    "chapter_code": "content",
-                    "check_item_code": "content_timeliness_check",
-                    "check_result": {"error": str(e), "batch_num": batch_num},
-                    "exist_issue": True,
-                    "risk_info": {"risk_level": "medium"}
-                }
-                all_issues.append(error_result)
-
-        # 统计结果
-        issue_count = sum(1 for item in all_issues if item.get("exist_issue", False))
-        logger.info(f"内容时效性审查完成:总计 {len(all_references)} 项引用,发现问题 {issue_count} 项")
-
-        return all_issues
+        ...
+        """
 
 
+    # [已注释] 旧的向量搜索方法,已被新的规则匹配替代
+    """
     async def _async_search_standard(
     async def _async_search_standard(
         self,
         self,
         standard_number: str,
         standard_number: str,
         collection_name: str,
         collection_name: str,
-        top_k: int = 10  # 增加召回数量,提高精确匹配机会
+        top_k: int = 10
     ) -> List[dict]:
     ) -> List[dict]:
-        """异步搜索单个规范"""
+        '''异步搜索单个规范'''
         try:
         try:
             loop = asyncio.get_running_loop()
             loop = asyncio.get_running_loop()
             func = partial(
             func = partial(
@@ -418,31 +399,34 @@ class ContentTimelinessReviewer:
         except Exception as e:
         except Exception as e:
             logger.error(f"搜索失败 '{standard_number}': {e}")
             logger.error(f"搜索失败 '{standard_number}': {e}")
             return []
             return []
+    """
 
 
 
 
 # ===== 便捷函数 =====
 # ===== 便捷函数 =====
 
 
 async def review_tertiary_content_timeliness(
 async def review_tertiary_content_timeliness(
     tertiary_details: List[Dict[str, Any]],
     tertiary_details: List[Dict[str, Any]],
-    collection_name: str = "first_bfp_collection_status",
+    collection_name: str = "first_bfp_collection_status",  # [保留参数但不再使用]
     max_concurrent: int = 4,
     max_concurrent: int = 4,
     progress_manager=None,
     progress_manager=None,
-    callback_task_id: str = None
+    callback_task_id: str = None,
+    db_pool=None  # [新增] 数据库连接池
 ) -> List[Dict[str, Any]]:
 ) -> List[Dict[str, Any]]:
     """
     """
-    审查三级分类内容时效性的便捷函数
+    [已修改] 审查三级分类内容时效性的便捷函数
 
 
     Args:
     Args:
         tertiary_details: 三级分类详情列表
         tertiary_details: 三级分类详情列表
-        collection_name: Milvus集合名称
+        collection_name: Milvus集合名称(已废弃,保留参数用于兼容)
         max_concurrent: 最大并发数
         max_concurrent: 最大并发数
         progress_manager: 进度管理器(可选)
         progress_manager: 进度管理器(可选)
         callback_task_id: 回调任务ID(可选)
         callback_task_id: 回调任务ID(可选)
+        db_pool: 数据库连接池(用于新的规则匹配)
 
 
     Returns:
     Returns:
         List[Dict]: 标准化的审查结果列表
         List[Dict]: 标准化的审查结果列表
     """
     """
-    async with ContentTimelinessReviewer(max_concurrent=max_concurrent) as reviewer:
+    async with ContentTimelinessReviewer(max_concurrent=max_concurrent, db_pool=db_pool) as reviewer:
         return await reviewer.review_tertiary_content(
         return await reviewer.review_tertiary_content(
             tertiary_details=tertiary_details,
             tertiary_details=tertiary_details,
             collection_name=collection_name,
             collection_name=collection_name,

+ 181 - 0
core/construction_review/component/standard_matching/README.md

@@ -0,0 +1,181 @@
+# 标准库匹配模块 - 时效性审查
+
+## 简介
+
+本模块提供基于内存匹配规则的标准时效性审查功能,替代原有的向量搜索+LLM判断方式,具有以下优势:
+
+- **高性能**:数据加载到内存后,查询无需访问数据库
+- **准确性**:基于规则的精确匹配,不受LLM幻觉影响
+- **无LLM依赖**:纯规则匹配,无需调用大模型
+- **易于维护**:清晰的匹配规则逻辑,便于调试和优化
+
+## 模块结构
+
+```
+standard_matching/
+├── __init__.py              # 模块导出
+├── standard_dao.py          # 数据访问对象(从MySQL加载数据)
+├── standard_service.py      # 核心业务逻辑(内存匹配)
+└── README.md               # 使用说明
+```
+
+## 核心组件
+
+### 1. StandardMatchingService
+
+标准匹配服务,对外暴露的统一接口。
+
+**主要方法:**
+- `initialize()`: 从数据库加载数据到内存(只需调用一次)
+- `check_standards(standards)`: 批量检查标准列表
+- `check_single(seq_no, name, number)`: 检查单个标准
+
+### 2. StandardTimelinessReviewer
+
+时效性审查器,位于 `reviewers/standard_timeliness_reviewer.py`,提供更高级的审查功能。
+
+**主要方法:**
+- `review_standards(standards)`: 审查标准列表,返回详细审查结果
+- `review_single(name, number, seq_no)`: 审查单个标准
+- `convert_to_standardized_format(results)`: 转换为标准格式(兼容原有审查系统)
+
+## 使用示例
+
+### 方式1:使用便捷函数(推荐)
+
+```python
+import asyncio
+from foundation.infrastructure.mysql.async_mysql_conn_pool import AsyncMySQLPool
+from core.construction_review.component.reviewers import review_standards_timeliness
+
+async def main():
+    # 初始化数据库连接池
+    db_pool = AsyncMySQLPool()
+    await db_pool.initialize()
+
+    # 定义要检查的标准列表
+    standards = [
+        {"standard_name": "铁路桥涵设计规范", "standard_number": "TB 10002-2017"},
+        {"standard_name": "起重机 钢丝绳 保养、维护、检验和报废", "standard_number": "GB/T 5972-2016"},
+    ]
+
+    # 执行时效性审查
+    results = await review_standards_timeliness(standards, db_pool=db_pool)
+
+    # 处理结果
+    for result in results:
+        print(f"{result.standard_name}: {result.process_result}")
+        if result.has_issue:
+            print(f"  问题: {result.issue_type}")
+            print(f"  建议: {result.suggestion}")
+
+    await db_pool.close()
+
+asyncio.run(main())
+```
+
+### 方式2:使用异步上下文管理器
+
+```python
+from core.construction_review.component.reviewers import StandardTimelinessReviewer
+
+async def main():
+    db_pool = AsyncMySQLPool()
+    await db_pool.initialize()
+
+    async with StandardTimelinessReviewer(db_pool=db_pool) as reviewer:
+        # 审查标准
+        results = reviewer.review_standards(standards_list)
+
+        # 转换为标准格式(兼容原有系统)
+        standardized = reviewer.convert_to_standardized_format(
+            results,
+            check_item="timeliness_check",
+            chapter_code="basis",
+            check_item_code="basis_timeliness_check"
+        )
+
+    await db_pool.close()
+```
+
+### 方式3:直接使用 StandardMatchingService
+
+```python
+from core.construction_review.component.standard_matching import StandardMatchingService
+
+async def main():
+    # 创建服务并初始化
+    service = StandardMatchingService(db_pool=db_pool)
+    await service.initialize()
+
+    # 批量检查
+    results = service.check_standards([
+        {"standard_name": "铁路桥涵设计规范", "standard_number": "TB 10002-2017"},
+    ])
+
+    for result in results:
+        print(f"状态: {result.status_code}")
+        print(f"结果: {result.final_result}")
+```
+
+## 匹配结果状态码
+
+| 状态码 | 说明 | 风险等级 |
+|--------|------|----------|
+| OK | 标准现行有效 | none |
+| SUBSTITUTED | 标准被替代 | high |
+| ABOLISHED | 标准废止无替代 | high |
+| MISMATCH | 名称与标准号不匹配 | medium |
+| NOT_FOUND | 标准库不存在 | medium |
+
+## 匹配规则流程
+
+1. **标准号精确匹配**
+   - 匹配成功 -> 检查名称是否匹配 -> 检查时效性状态
+   - 匹配失败 -> 尝试模糊匹配标准号
+
+2. **名称匹配**
+   - 精确匹配成功 -> 检查时效性状态
+   - 模糊匹配成功 -> 返回不匹配(标准号错误)
+   - 匹配失败 -> 返回不存在
+
+3. **时效性状态处理**
+   - 现行/试行 -> 正常
+   - 废止 -> 查找同名现行标准(被替代)
+   - 废止无替代 -> 废止无现行
+
+## 性能考虑
+
+- 数据加载:应用启动时一次性从MySQL加载,约1-2秒(1000+条标准)
+- 内存占用:约5-10MB(取决于标准数据量)
+- 查询速度:内存操作,单次匹配 < 1ms
+
+## 集成到现有系统
+
+新的时效性审查逻辑可以集成到以下模块:
+
+1. **timeliness_basis_reviewer.py**: 编制依据时效性审查
+2. **timeliness_content_reviewer.py**: 三级分类内容时效性审查
+
+集成方式:将原有的向量搜索+LLM判断逻辑替换为新的规则匹配逻辑。
+
+示例:
+```python
+# 原有方式(向量搜索+LLM)
+search_results = await self._async_search_basis(basis, collection_name)
+match_result = await match_reference_files(reference_text=search_results, review_text=basis)
+llm_out = await determine_timeliness_issue(match_result)
+
+# 新方式(规则匹配)
+from core.construction_review.component.reviewers import review_standard_timeliness_with_standardized_output
+results = await review_standard_timeliness_with_standardized_output(
+    standards_list,
+    db_pool=db_pool
+)
+```
+
+## 注意事项
+
+1. **数据库连接池**:使用时需要传入已初始化的 AsyncMySQLPool 实例
+2. **单例模式**:StandardTimelinessReviewer 支持单例模式,可通过 `get_standard_matching_service()` 获取全局实例
+3. **数据更新**:如果标准库数据发生变化,需要重新初始化服务以加载最新数据

+ 34 - 0
core/construction_review/component/standard_matching/__init__.py

@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+标准库匹配模块 - 时效性审查核心组件
+
+提供基于内存的标准库查询和匹配功能,用于替代原有的向量搜索+LLM判断方式。
+
+主要组件:
+- StandardMatchingService: 对外服务接口
+- StandardMatcher: 匹配规则逻辑
+- StandardRepository: 内存数据存储和索引
+"""
+
+from .standard_service import (
+    StandardMatchingService,
+    StandardMatcher,
+    StandardRepository,
+    StandardMatchResult,
+    StandardRecord,
+    ValidityStatus,
+    MatchResultCode,
+)
+from .standard_dao import StandardDAO
+
+__all__ = [
+    'StandardMatchingService',
+    'StandardMatcher',
+    'StandardRepository',
+    'StandardMatchResult',
+    'StandardRecord',
+    'StandardDAO',
+    'ValidityStatus',
+    'MatchResultCode',
+]

+ 43 - 0
core/construction_review/component/standard_matching/standard_dao.py

@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+标准库数据访问对象
+用于从MySQL一次性加载所有标准数据到内存
+"""
+from typing import List, Dict
+
+
+class StandardDAO:
+    """标准库数据访问对象 - 负责从数据库加载数据"""
+
+    def __init__(self, db_pool):
+        self.db_pool = db_pool
+        self.table_name = "t_samp_standard_base_info"
+
+    async def load_all_standards(self) -> List[Dict]:
+        """
+        一次性从MySQL加载所有标准数据到内存
+
+        Returns:
+            标准列表,每个标准包含:
+                - id: 序号
+                - standard_name: 标准名称(chinese_name)
+                - standard_number: 标准号
+                - validity: 时效性(XH/SX/FZ)
+        """
+        query = f"""
+            SELECT
+                id,
+                chinese_name AS standard_name,
+                standard_number,
+                validity
+            FROM {self.table_name}
+            WHERE validity IS NOT NULL
+        """
+        try:
+            async with self.db_pool.get_cursor() as cursor:
+                await cursor.execute(query)
+                results = await cursor.fetchall()
+                return [dict(row) for row in results] if results else []
+        except Exception as e:
+            raise RuntimeError(f"加载标准库数据失败: {e}")

+ 706 - 0
core/construction_review/component/standard_matching/standard_service.py

@@ -0,0 +1,706 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+标准库匹配规则服务 - 内存处理版本
+实现施工方案审查-时效性审查的匹配逻辑
+
+架构:
+- StandardRepository: 内存数据存储和索引
+- StandardMatcher: 匹配规则逻辑
+- StandardMatchingService: 对外服务接口
+"""
+from typing import List, Dict, Optional
+from dataclasses import dataclass, field
+from enum import Enum
+
+from foundation.observability.logger.loggering import review_logger as logger
+
+
+class ValidityStatus(Enum):
+    """时效性状态"""
+    CURRENT = "XH"      # 现行
+    TRIAL = "SX"        # 试行
+    ABOLISHED = "FZ"    # 废止
+
+
+class MatchResultCode(Enum):
+    """匹配结果状态码"""
+    OK = "OK"                       # 正常
+    SUBSTITUTED = "SUBSTITUTED"     # 被替代
+    ABOLISHED = "ABOLISHED"         # 废止无现行
+    MISMATCH = "MISMATCH"           # 不匹配
+    NOT_FOUND = "NOT_FOUND"         # 标准库不存在
+
+
+@dataclass
+class StandardMatchResult:
+    """标准匹配结果数据结构"""
+    seq_no: int = 0                             # 序号
+    original_name: str = ""                      # 原始标准名称
+    original_number: str = ""                    # 原始标准号
+    substitute_number: Optional[str] = None      # 替代标准号(如果有)
+    substitute_name: Optional[str] = None        # 替代标准名称(如果有)
+    process_result: str = ""                     # 处理结果状态
+    status_code: str = ""                        # 状态码
+    final_result: str = ""                       # 最终结果消息
+
+
+@dataclass
+class StandardRecord:
+    """标准记录数据结构"""
+    id: int
+    standard_name: str
+    standard_number: str
+    validity: str
+
+
+class StandardRepository:
+    """
+    标准库内存数据仓库
+    负责加载和索引标准数据,支持快速查询
+    """
+
+    def __init__(self):
+        # 原始数据列表
+        self._records: List[StandardRecord] = []
+
+        # 索引结构,加速查询
+        self._number_index: Dict[str, StandardRecord] = {}  # 标准号 -> 记录
+        self._name_index: Dict[str, List[StandardRecord]] = {}  # 名称 -> 记录列表
+        self._current_records: List[StandardRecord] = []  # 现行/试行标准列表
+
+    def load_data(self, raw_data: List[Dict]):
+        """
+        加载原始数据到内存并建立索引
+
+        Args:
+            raw_data: 从数据库查询的原始标准数据列表
+        """
+        self._records = []
+        self._number_index = {}
+        self._name_index = {}
+        self._current_records = []
+
+        for item in raw_data:
+            # 跳过无效数据
+            standard_number = item.get("standard_number")
+            standard_name = item.get("standard_name")
+            if not standard_number or not standard_name:
+                continue
+
+            record = StandardRecord(
+                id=item.get("id", 0),
+                standard_name=standard_name,
+                standard_number=standard_number,
+                validity=item.get("validity", "")
+            )
+            self._records.append(record)
+
+            # 建立标准号索引
+            self._number_index[record.standard_number] = record
+
+            # 建立名称索引(一个名称可能对应多个标准号)
+            if record.standard_name not in self._name_index:
+                self._name_index[record.standard_name] = []
+            self._name_index[record.standard_name].append(record)
+
+            # 收集现行/试行标准
+            if record.validity in [ValidityStatus.CURRENT.value, ValidityStatus.TRIAL.value]:
+                self._current_records.append(record)
+
+        # 对现行标准按标准号降序排序(用于找最新替代标准)
+        # 处理可能的 None 值
+        self._current_records.sort(
+            key=lambda r: r.standard_number or "",
+            reverse=True
+        )
+        logger.info(f"标准库数据加载完成: {len(self._records)} 条记录")
+
+    def find_by_number_exact(self, standard_number: str) -> Optional[StandardRecord]:
+        """精确匹配标准号"""
+        return self._number_index.get(standard_number)
+
+    def find_by_name_exact(self, standard_name: str) -> Optional[StandardRecord]:
+        """精确匹配标准名称(返回第一个)"""
+        records = self._name_index.get(standard_name, [])
+        return records[0] if records else None
+
+    def find_by_name_fuzzy(self, standard_name: str) -> List[StandardRecord]:
+        """模糊匹配标准名称"""
+        results = []
+        for name, records in self._name_index.items():
+            if standard_name in name or name in standard_name:
+                results.extend(records)
+        return results
+
+    def find_by_number_fuzzy(self, standard_number: str) -> List[StandardRecord]:
+        """模糊匹配标准号"""
+        results = []
+        # 提取前缀(如 GB/T 5972)
+        parts = standard_number.split("-")
+        prefix = parts[0] if parts else standard_number
+
+        for number, record in self._number_index.items():
+            # 前缀匹配
+            if number.startswith(prefix):
+                results.append(record)
+        return results
+
+    def find_current_by_name(self, standard_name: str) -> List[StandardRecord]:
+        """查询指定名称的现行/试行标准(支持模糊匹配)"""
+        results = []
+        for record in self._current_records:
+            # 精确匹配
+            if record.standard_name == standard_name:
+                results.append(record)
+            # 模糊匹配(忽略空格、书名号等)
+            elif self._is_name_fuzzy_match_for_repo(record.standard_name, standard_name):
+                results.append(record)
+        return results
+
+    def _is_name_fuzzy_match_for_repo(self, name1: str, name2: str) -> bool:
+        """判断两个标准名称是否模糊匹配"""
+        clean1 = name1.replace("《", "").replace("》", "").replace(" ", "").replace(" ", "")
+        clean2 = name2.replace("《", "").replace("》", "").replace(" ", "").replace(" ", "")
+        return clean1 == clean2
+
+    def get_all_records(self) -> List[StandardRecord]:
+        """获取所有记录"""
+        return self._records.copy()
+
+
+class StandardMatcher:
+    """
+    标准匹配器
+    实现标准库匹配规则的核心逻辑
+    """
+
+    def __init__(self, repository: StandardRepository):
+        self.repo = repository
+
+    def match(self, seq_no: int, input_name: str, input_number: str) -> StandardMatchResult:
+        """
+        执行标准匹配
+
+        匹配流程:
+        1. 标准号精确匹配
+        2. 根据匹配结果进入不同分支处理
+        """
+        # 去除前后空格
+        input_name = input_name.strip() if input_name else input_name
+        input_number = input_number.strip() if input_number else input_number
+
+        # 清洗书名号和括号
+        input_name = self._clean_brackets_and_booknames(input_name)
+        input_number = self._clean_brackets_and_booknames(input_number)
+
+        result = StandardMatchResult(
+            seq_no=seq_no,
+            original_name=input_name,
+            original_number=input_number
+        )
+
+        # 步骤1: 精确匹配标准号
+        match_by_number = self.repo.find_by_number_exact(input_number)
+
+        if match_by_number:
+            # 分支A: 标准号匹配成功
+            return self._handle_number_matched(result, match_by_number, input_name)
+        else:
+            # 分支B: 标准号未匹配
+            return self._handle_number_not_matched(result, input_name, input_number)
+
+    def _handle_number_matched(
+        self,
+        result: StandardMatchResult,
+        db_record: StandardRecord,
+        input_name: str
+    ) -> StandardMatchResult:
+        """处理标准号匹配成功的情况"""
+        # 检查名称是否匹配
+        if db_record.standard_name == input_name:
+            # 名称也匹配
+            return self._handle_full_match(result, db_record)
+        else:
+            # 名称不匹配
+            return self._handle_name_mismatch(result, db_record, input_name)
+
+    def _handle_full_match(
+        self,
+        result: StandardMatchResult,
+        db_record: StandardRecord
+    ) -> StandardMatchResult:
+        """处理名称和标准号都完全匹配的情况"""
+        if db_record.validity in [ValidityStatus.CURRENT.value, ValidityStatus.TRIAL.value]:
+            # 情况1: 现行或试行 - 状态正常
+            return self._set_ok_result(result)
+        else:
+            # 废止状态 - 查找替代标准
+            return self._handle_abolished(result, db_record)
+
+    def _handle_name_mismatch(
+        self,
+        result: StandardMatchResult,
+        db_record: StandardRecord,
+        input_name: str
+    ) -> StandardMatchResult:
+        """处理标准号匹配但名称不匹配的情况"""
+        # 首先检查是否是名称模糊匹配(忽略空格、书名号等)
+        if self._is_name_fuzzy_match(db_record.standard_name, input_name):
+            # 名称模糊匹配成功,按完全匹配处理
+            return self._handle_full_match(result, db_record)
+
+        # 尝试用输入的名称模糊匹配
+        name_matches = self.repo.find_by_name_fuzzy(input_name)
+
+        # 查找精确名称匹配
+        exact_match = self._find_exact_name_match(name_matches, input_name)
+
+        if exact_match:
+            # 找到名称匹配的记录
+            return self._handle_fuzzy_name_match(result, exact_match)
+
+        # 尝试在模糊匹配结果中查找模糊名称匹配
+        for match in name_matches:
+            if self._is_name_fuzzy_match(match.standard_name, input_name):
+                return self._handle_fuzzy_name_match(result, match)
+
+        # 名称完全不匹配,但标准号已匹配成功
+        # 说明该标准存在于库中,应返回不匹配而非不存在
+        if db_record.validity in [ValidityStatus.CURRENT.value, ValidityStatus.TRIAL.value]:
+            return self._set_mismatch_result(result, db_record)
+        elif db_record.validity == ValidityStatus.ABOLISHED.value:
+            return self._handle_abolished(result, db_record)
+
+        return self._set_not_found_result(result)
+
+    def _handle_number_not_matched(
+        self,
+        result: StandardMatchResult,
+        input_name: str,
+        input_number: str
+    ) -> StandardMatchResult:
+        """处理标准号未匹配的情况"""
+        # 尝试模糊匹配标准号
+        fuzzy_number_matches = self.repo.find_by_number_fuzzy(input_number)
+
+        if fuzzy_number_matches:
+            # 检查名称是否匹配
+            return self._check_name_in_records(result, fuzzy_number_matches, input_name)
+        else:
+            # 尝试直接按名称查询
+            return self._search_by_name_only(result, input_name)
+
+    def _check_name_in_records(
+        self,
+        result: StandardMatchResult,
+        records: List[StandardRecord],
+        input_name: str
+    ) -> StandardMatchResult:
+        """在一批记录中查找名称匹配"""
+        # 首先尝试精确匹配
+        for record in records:
+            if record.standard_name == input_name:
+                # 名称匹配,检查状态
+                if record.validity in [ValidityStatus.CURRENT.value, ValidityStatus.TRIAL.value]:
+                    return self._set_mismatch_result(result, record)
+                elif record.validity == ValidityStatus.ABOLISHED.value:
+                    return self._handle_abolished(result, record)
+
+        # 尝试模糊名称匹配(忽略空格和书名号)
+        for record in records:
+            if self._is_name_fuzzy_match(record.standard_name, input_name):
+                # 名称模糊匹配成功
+                if record.validity in [ValidityStatus.CURRENT.value, ValidityStatus.TRIAL.value]:
+                    return self._set_mismatch_result(result, record)
+                elif record.validity == ValidityStatus.ABOLISHED.value:
+                    return self._handle_abolished(result, record)
+
+        # 名称不匹配
+        return self._set_not_found_result(result)
+
+    def _search_by_name_only(
+        self,
+        result: StandardMatchResult,
+        input_name: str
+    ) -> StandardMatchResult:
+        """仅通过名称查询"""
+        # 精确匹配名称
+        name_match = self.repo.find_by_name_exact(input_name)
+
+        if name_match:
+            if name_match.validity in [ValidityStatus.CURRENT.value, ValidityStatus.TRIAL.value]:
+                return self._set_mismatch_result(result, name_match)
+            elif name_match.validity == ValidityStatus.ABOLISHED.value:
+                return self._set_not_found_result(result)
+
+        # 模糊匹配名称
+        fuzzy_matches = self.repo.find_by_name_fuzzy(input_name)
+
+        # 首先尝试精确匹配
+        exact_match = self._find_exact_name_match(fuzzy_matches, input_name)
+        if exact_match:
+            if exact_match.validity in [ValidityStatus.CURRENT.value, ValidityStatus.TRIAL.value]:
+                return self._set_mismatch_result(result, exact_match)
+
+        # 尝试模糊名称匹配(忽略空格、书名号等)
+        for match in fuzzy_matches:
+            if self._is_name_fuzzy_match(match.standard_name, input_name):
+                if match.validity in [ValidityStatus.CURRENT.value, ValidityStatus.TRIAL.value]:
+                    return self._set_mismatch_result(result, match)
+                elif match.validity == ValidityStatus.ABOLISHED.value:
+                    return self._handle_abolished(result, match)
+
+        return self._set_not_found_result(result)
+
+    def _handle_fuzzy_name_match(
+        self,
+        result: StandardMatchResult,
+        match_record: StandardRecord
+    ) -> StandardMatchResult:
+        """处理模糊名称匹配成功的情况"""
+        if match_record.validity in [ValidityStatus.CURRENT.value, ValidityStatus.TRIAL.value]:
+            return self._set_mismatch_result(result, match_record)
+        elif match_record.validity == ValidityStatus.ABOLISHED.value:
+            return self._handle_abolished(result, match_record)
+        return self._set_not_found_result(result)
+
+    def _handle_abolished(
+        self,
+        result: StandardMatchResult,
+        abolished_record: StandardRecord
+    ) -> StandardMatchResult:
+        """处理已废止标准的情况"""
+        # 查询同名现行标准作为替代
+        substitutes = self.repo.find_current_by_name(abolished_record.standard_name)
+
+        if substitutes:
+            # 有替代标准,取最新的(已按标准号降序)
+            latest = substitutes[0]
+            return self._set_substituted_result(result, latest)
+        else:
+            # 无替代标准
+            return self._set_abolished_result(result)
+
+    # ========== 格式化方法 ==========
+
+    def _format_standard_name(self, name: str) -> str:
+        """格式化标准名称,确保只有一个《》包裹"""
+        if not name:
+            return name
+        name = name.strip()
+        # 去除已有的书名号
+        while name.startswith('《'):
+            name = name[1:]
+        while name.endswith('》'):
+            name = name[:-1]
+        return f"《{name}》"
+
+    def _format_standard_number(self, number: str) -> str:
+        """格式化标准编号,确保用()包裹"""
+        if not number:
+            return number
+        number = number.strip()
+        # 去除已有的括号
+        if number.startswith('(') or number.startswith('('):
+            number = number[1:]
+        if number.endswith(')') or number.endswith(')'):
+            number = number[:-1]
+        return f"({number})"
+
+    # ========== 结果设置方法(每个方法职责单一) ==========
+
+    def _set_ok_result(self, result: StandardMatchResult) -> StandardMatchResult:
+        """设置状态正常的结果"""
+        result.process_result = "正常"
+        result.status_code = MatchResultCode.OK.value
+        result.final_result = "无问题"
+        return result
+
+    def _set_substituted_result(
+        self,
+        result: StandardMatchResult,
+        substitute: StandardRecord
+    ) -> StandardMatchResult:
+        """设置被替代的结果"""
+        result.substitute_name = self._format_standard_name(substitute.standard_name)
+        result.substitute_number = self._format_standard_number(substitute.standard_number)
+        result.process_result = "被替代"
+        result.status_code = MatchResultCode.SUBSTITUTED.value
+        result.final_result = (
+            f"{self._format_standard_name(result.original_name)}"
+            f"{self._format_standard_number(result.original_number)}已废止,"
+            f"替代{self._format_standard_name(substitute.standard_name)}"
+            f"{self._format_standard_number(substitute.standard_number)}"
+        )
+        return result
+
+    def _set_abolished_result(self, result: StandardMatchResult) -> StandardMatchResult:
+        """设置废止无替代的结果"""
+        result.process_result = "废止无现行"
+        result.status_code = MatchResultCode.ABOLISHED.value
+        result.final_result = (
+            f"{self._format_standard_name(result.original_name)}"
+            f"{self._format_standard_number(result.original_number)}已废止,无现行状态"
+        )
+        return result
+
+    def _set_mismatch_result(
+        self,
+        result: StandardMatchResult,
+        actual: StandardRecord
+    ) -> StandardMatchResult:
+        """设置不匹配的结果"""
+        result.substitute_name = self._format_standard_name(actual.standard_name)
+        result.substitute_number = self._format_standard_number(actual.standard_number)
+        result.process_result = "不匹配"
+        result.status_code = MatchResultCode.MISMATCH.value
+        result.final_result = (
+            f"{self._format_standard_name(result.original_name)}"
+            f"{self._format_standard_number(result.original_number)}"
+            f"与实际{self._format_standard_name(actual.standard_name)}"
+            f"{self._format_standard_number(actual.standard_number)}不匹配"
+        )
+        return result
+
+    def _set_not_found_result(self, result: StandardMatchResult) -> StandardMatchResult:
+        """设置不存在的结果"""
+        result.process_result = "标准库不存在"
+        result.status_code = MatchResultCode.NOT_FOUND.value
+        result.final_result = (
+            f"{self._format_standard_name(result.original_name)}"
+            f"{self._format_standard_number(result.original_number)}标准库不存在,请确认"
+        )
+        return result
+
+    # ========== 工具方法 ==========
+
+    def _is_name_fuzzy_match(self, name1: str, name2: str) -> bool:
+        """
+        判断两个标准名称是否模糊匹配
+        只去除书名号,保留中间空格(中间空格属于名称的一部分)
+        """
+        # 清理书名号,但保留中间空格
+        clean1 = name1.replace("《", "").replace("》", "")
+        clean2 = name2.replace("《", "").replace("》", "")
+        return clean1 == clean2
+
+    def _clean_brackets_and_booknames(self, text: str) -> str:
+        """
+        清洗字符串前后的书名号和括号
+        包括:《》()()
+        """
+        if not text:
+            return text
+
+        # 循环去除前后的书名号和括号,直到没有变化
+        changed = True
+        while changed:
+            changed = False
+            original = text
+
+            # 去除前导的书名号和括号
+            if text.startswith("《"):
+                text = text[1:]
+                changed = True
+            if text.startswith("》"):
+                text = text[1:]
+                changed = True
+            if text.startswith("("):
+                text = text[1:]
+                changed = True
+            if text.startswith(")"):
+                text = text[1:]
+                changed = True
+            if text.startswith("("):
+                text = text[1:]
+                changed = True
+            if text.startswith(")"):
+                text = text[1:]
+                changed = True
+
+            # 去除尾随的书名号和括号
+            if text.endswith("《"):
+                text = text[:-1]
+                changed = True
+            if text.endswith("》"):
+                text = text[:-1]
+                changed = True
+            if text.endswith("("):
+                text = text[:-1]
+                changed = True
+            if text.endswith(")"):
+                text = text[:-1]
+                changed = True
+            if text.endswith("("):
+                text = text[:-1]
+                changed = True
+            if text.endswith(")"):
+                text = text[:-1]
+                changed = True
+
+            # 如果文本变空了,停止循环
+            if not text:
+                break
+
+        return text
+
+    def _find_exact_name_match(
+        self,
+        records: List[StandardRecord],
+        target_name: str
+    ) -> Optional[StandardRecord]:
+        """在记录列表中查找精确名称匹配"""
+        for record in records:
+            if record.standard_name == target_name:
+                return record
+        return None
+
+
+class StandardMatchingService:
+    """
+    标准库匹配服务
+    对外暴露的统一接口
+    """
+
+    def __init__(self, db_pool=None, own_db_pool: bool = False):
+        """
+        初始化服务
+
+        Args:
+            db_pool: 数据库连接池(必填,不再支持Mock模式)
+            own_db_pool: 是否拥有连接池的所有权(为True时close()会关闭连接池)
+
+        Raises:
+            RuntimeError: 初始化时如果db_pool为None会抛出异常
+        """
+        if not db_pool:
+            raise RuntimeError(
+                "StandardMatchingService 初始化失败: 必须提供数据库连接池(db_pool)。\n"
+                "Mock模式已取消,请确保数据库连接正常。"
+            )
+        self.db_pool = db_pool
+        self._own_db_pool = own_db_pool  # 标记是否拥有连接池所有权
+        self.repository = StandardRepository()
+        self.matcher = StandardMatcher(self.repository)
+        self._initialized = False
+
+    async def initialize(self):
+        """
+        初始化:从数据库加载数据到内存
+        只需要执行一次
+
+        Raises:
+            RuntimeError: 当数据库连接池为None时抛出异常(已取消Mock模式)
+        """
+        if self._initialized:
+            return
+
+        if not self.db_pool:
+            raise RuntimeError(
+                "标准匹配服务初始化失败: 数据库连接池(db_pool)为None。\n"
+                "请检查:\n"
+                "  1. MySQL数据库配置是否正确\n"
+                "  2. 数据库服务是否正常运行\n"
+                "  3. 网络连接是否正常"
+            )
+
+        # 从真实数据库加载
+        from .standard_dao import StandardDAO
+        dao = StandardDAO(self.db_pool)
+        raw_data = await dao.load_all_standards()
+        logger.info(f"从数据库加载标准数据: {len(raw_data)} 条")
+
+        self.repository.load_data(raw_data)
+        self._initialized = True
+        logger.info("标准匹配服务初始化完成")
+
+    async def close(self):
+        """关闭服务,清理资源"""
+        # 只有当拥有连接池所有权时才关闭连接池
+        if self._own_db_pool and self.db_pool:
+            await self.db_pool.close()
+        self._initialized = False
+
+    def check_standards(self, standards: List[Dict[str, str]]) -> List[StandardMatchResult]:
+        """
+        批量检查标准列表
+
+        Args:
+            standards: 标准列表,每个元素包含:
+                - standard_name: 标准名称(原始)
+                - standard_number: 标准号(原始)
+
+        Returns:
+            List[StandardMatchResult]: 匹配结果列表
+        """
+        if not self._initialized:
+            raise RuntimeError("服务未初始化,请先调用 initialize()")
+
+        results = []
+        for idx, std in enumerate(standards, start=1):
+            result = self.matcher.match(
+                seq_no=idx,
+                input_name=std.get("standard_name", ""),
+                input_number=std.get("standard_number", "")
+            )
+            results.append(result)
+        return results
+
+    def check_single(
+        self,
+        seq_no: int,
+        standard_name: str,
+        standard_number: str
+    ) -> StandardMatchResult:
+        """
+        检查单个标准
+
+        Args:
+            seq_no: 序号
+            standard_name: 标准名称
+            standard_number: 标准号
+
+        Returns:
+            StandardMatchResult: 匹配结果
+        """
+        if not self._initialized:
+            raise RuntimeError("服务未初始化,请先调用 initialize()")
+
+        return self.matcher.match(seq_no, standard_name, standard_number)
+
+
+# 全局服务实例(单例模式)
+_standard_matching_service: Optional[StandardMatchingService] = None
+
+
+async def get_standard_matching_service(db_pool=None) -> StandardMatchingService:
+    """
+    获取标准匹配服务实例(单例模式)
+
+    Args:
+        db_pool: 数据库连接池(必填)
+
+    Returns:
+        StandardMatchingService: 标准匹配服务实例
+
+    Raises:
+        RuntimeError: 当db_pool为None时抛出异常(已取消Mock模式)
+    """
+    if not db_pool:
+        raise RuntimeError(
+            "获取标准匹配服务失败: 必须提供数据库连接池(db_pool)。\n"
+            "Mock模式已取消,请确保数据库连接正常。"
+        )
+
+    global _standard_matching_service
+    if _standard_matching_service is None:
+        _standard_matching_service = StandardMatchingService(db_pool)
+        await _standard_matching_service.initialize()
+    return _standard_matching_service
+
+
+def reset_standard_matching_service():
+    """重置标准匹配服务实例(主要用于测试)"""
+    global _standard_matching_service
+    _standard_matching_service = None

+ 26 - 6
core/construction_review/workflows/ai_review_workflow.py

@@ -75,7 +75,7 @@ class AIReviewWorkflow:
     """基于LangGraph的AI审查工作流"""
     """基于LangGraph的AI审查工作流"""
 
 
     def __init__(self, task_file_info: TaskFileInfo, structured_content: Dict[str, Any],
     def __init__(self, task_file_info: TaskFileInfo, structured_content: Dict[str, Any],
-                 progress_manager=None, max_review_units: int = None, review_mode: str = "all"):
+                 progress_manager=None, max_review_units: int = None, review_mode: str = "all", db_pool=None):
         """
         """
         初始化AI审查工作流
         初始化AI审查工作流
 
 
@@ -85,13 +85,14 @@ class AIReviewWorkflow:
             progress_manager: 进度管理器
             progress_manager: 进度管理器
             max_review_units: 最大审查单元数量(None表示审查所有)
             max_review_units: 最大审查单元数量(None表示审查所有)
             review_mode: 审查模式 ("all"=全部, "first"=前N个, "random"=随机N个)
             review_mode: 审查模式 ("all"=全部, "first"=前N个, "random"=随机N个)
+            db_pool: 数据库连接池(用于时效性审查等新逻辑)
         """
         """
         # 工作流超时时间定义
         # 工作流超时时间定义
         self.WORKFLOW_TIMEOUT = 3600
         self.WORKFLOW_TIMEOUT = 3600
 
 
         # 任务文件信息
         # 任务文件信息
         self.task_info = task_file_info
         self.task_info = task_file_info
-        
+
         self.file_id = task_file_info.file_id
         self.file_id = task_file_info.file_id
         self.callback_task_id = task_file_info.callback_task_id
         self.callback_task_id = task_file_info.callback_task_id
         self.user_id = task_file_info.user_id
         self.user_id = task_file_info.user_id
@@ -101,8 +102,8 @@ class AIReviewWorkflow:
         self.structured_content = structured_content
         self.structured_content = structured_content
         self.progress_manager = progress_manager
         self.progress_manager = progress_manager
 
 
-        # 传递 TaskFileInfo 实例
-        self.ai_review_engine = AIReviewEngine(task_file_info)
+        # 传递 TaskFileInfo 实例和 db_pool
+        self.ai_review_engine = AIReviewEngine(task_file_info, db_pool=db_pool)
 
 
         # 初始化核心功能和工具类
         # 初始化核心功能和工具类
         self.core_fun = AIReviewCoreFun(task_file_info, self.ai_review_engine, max_review_units, review_mode)
         self.core_fun = AIReviewCoreFun(task_file_info, self.ai_review_engine, max_review_units, review_mode)
@@ -302,10 +303,29 @@ class AIReviewWorkflow:
 
 
             # 获取审查项配置
             # 获取审查项配置
             review_item_config_raw = self.task_info.get_review_item_config_list()
             review_item_config_raw = self.task_info.get_review_item_config_list()
-            
+
             # 将review_item_config中的值拆分成chapter_code和func_name 如{['basis':["sensitive_word_check","timeliness_basis_reviewer"]]}
             # 将review_item_config中的值拆分成chapter_code和func_name 如{['basis':["sensitive_word_check","timeliness_basis_reviewer"]]}
             review_item_config = self.core_fun._replace_review_suffix(review_item_config_raw, review_func_mapping)
             review_item_config = self.core_fun._replace_review_suffix(review_item_config_raw, review_func_mapping)
-            
+
+            # 【新增】处理时效性审查的章节映射:
+            # - basis 章节使用 timeliness_basis_reviewer(编制依据时效性)
+            # - 其他章节使用 timeliness_content_reviewer(内容时效性)
+            processed_config = []
+            for item in review_item_config:
+                if '_' in item:
+                    chapter_code, func_name = item.split('_', 1)
+                    # 如果是时效性审查,根据章节选择正确的审查器
+                    if func_name == 'timeliness_basis_reviewer':
+                        if chapter_code == 'basis':
+                            # basis 章节保持使用 timeliness_basis_reviewer
+                            processed_config.append(item)
+                        else:
+                            # 其他章节使用 timeliness_content_reviewer
+                            processed_config.append(f"{chapter_code}_timeliness_content_reviewer")
+                        continue
+                processed_config.append(item)
+            review_item_config = processed_config
+
             # 根据标准配置对review_item_config进行排序
             # 根据标准配置对review_item_config进行排序
             review_item_dict_sorted = self.core_fun._check_item_mapping_order(review_item_config)
             review_item_dict_sorted = self.core_fun._check_item_mapping_order(review_item_config)
             logger.info(f"审查项配置解析完成: {review_item_dict_sorted}")
             logger.info(f"审查项配置解析完成: {review_item_dict_sorted}")

+ 35 - 22
foundation/infrastructure/mysql/async_mysql_conn_pool.py

@@ -1,3 +1,4 @@
+import asyncio
 import aiomysql
 import aiomysql
 from contextlib import asynccontextmanager
 from contextlib import asynccontextmanager
 from typing import  Dict,Optional, AsyncGenerator
 from typing import  Dict,Optional, AsyncGenerator
@@ -8,36 +9,48 @@ from foundation.infrastructure.config import config_handler
 # 异步数据库连接池
 # 异步数据库连接池
 class AsyncMySQLPool:
 class AsyncMySQLPool:
     _instance = None
     _instance = None
-    
+
     def __new__(cls, *args, **kwargs):
     def __new__(cls, *args, **kwargs):
         if not cls._instance:
         if not cls._instance:
             cls._instance = super().__new__(cls)
             cls._instance = super().__new__(cls)
         return cls._instance
         return cls._instance
-    
+
     def __init__(self):
     def __init__(self):
         if not hasattr(self, '_pool'):
         if not hasattr(self, '_pool'):
             self._pool = None
             self._pool = None
             self._initialized = False
             self._initialized = False
-    
-    async def initialize(self):
-        """初始化连接池"""
-        try:
-            
-            self._pool = await aiomysql.create_pool(
-                host=config_handler.get("mysql", "MYSQL_HOST" , "localhost"),
-                port=int(config_handler.get("mysql", "MYSQL_PORT" , "3306")),
-                user=config_handler.get("mysql", "MYSQL_USER"),
-                password=config_handler.get("mysql", "MYSQL_PASSWORD"),
-                db=config_handler.get("mysql", "MYSQL_DB"),
-                minsize=int(config_handler.get("mysql", "MYSQL_MIN_SIZE" , "1")),
-                maxsize=int(config_handler.get("mysql", "MYSQL_MAX_SIZE" , "2")),
-                autocommit=config_handler.get("mysql", "MYSQL_AUTO_COMMIT")
-            )
-            self._initialized = True
-            server_logger.info("异步MySQL连接池初始化成功")
-        except Exception as e:
-            server_logger.error(f"连接池初始化失败: {e}")
-            raise
+
+    async def initialize(self, max_retries=3, retry_delay=2):
+        """初始化连接池,支持重试"""
+        last_error = None
+
+        for attempt in range(1, max_retries + 1):
+            try:
+                server_logger.info(f"尝试初始化MySQL连接池 (第{attempt}/{max_retries}次)...")
+
+                self._pool = await aiomysql.create_pool(
+                    host=config_handler.get("mysql", "MYSQL_HOST" , "localhost"),
+                    port=int(config_handler.get("mysql", "MYSQL_PORT" , "3306")),
+                    user=config_handler.get("mysql", "MYSQL_USER"),
+                    password=config_handler.get("mysql", "MYSQL_PASSWORD"),
+                    db=config_handler.get("mysql", "MYSQL_DB"),
+                    minsize=int(config_handler.get("mysql", "MYSQL_MIN_SIZE" , "1")),
+                    maxsize=int(config_handler.get("mysql", "MYSQL_MAX_SIZE" , "2")),
+                    autocommit=config_handler.get("mysql", "MYSQL_AUTO_COMMIT"),
+                    connect_timeout=int(config_handler.get("mysql", "MYSQL_CONNECT_TIMEOUT", "30"))
+                )
+                self._initialized = True
+                server_logger.info("异步MySQL连接池初始化成功")
+                return
+            except Exception as e:
+                last_error = e
+                server_logger.warning(f"连接池初始化失败 (第{attempt}次): {e}")
+                if attempt < max_retries:
+                    server_logger.info(f"{retry_delay}秒后重试...")
+                    await asyncio.sleep(retry_delay)
+
+        server_logger.error(f"连接池初始化失败,已重试{max_retries}次: {last_error}")
+        raise last_error
     
     
     async def close(self):
     async def close(self):
         """关闭连接池"""
         """关闭连接池"""

+ 0 - 141
test_content_timeliness.py

@@ -1,141 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-测试内容时效性审查是否正确处理 JTG B01-2011 的情况
-"""
-
-import json
-import asyncio
-from core.construction_review.component.reviewers.timeliness_content_reviewer import (
-    StandardExtractor, ContentTimelinessReviewer
-)
-
-# 测试数据 - 模拟 problem.json 中的情况
-test_tertiary_details = [
-    {
-        "third_category_name": "国家方针、政策、标准和设计文件",
-        "third_category_code": "NationalPoliciesStandardsAndDesignDocument",
-        "start_line": 80,
-        "end_line": 82,
-        "content": """<80> 国家方针、政策、标准和设计文件
-<81> 《公路工程技术标准》(JTG B01-2011)
-<82> 《公路桥涵设计通用规范》(JTG D60-2015)"""
-    }
-]
-
-# 测试提取器
-def test_extractor():
-    print("=" * 60)
-    print("测试规范提取器")
-    print("=" * 60)
-
-    extractor = StandardExtractor()
-
-    for detail in test_tertiary_details:
-        refs = extractor.extract_from_content(detail["content"])
-        print(f"\n从 '{detail['third_category_name']}' 提取到 {len(refs)} 个规范引用:")
-        for ref in refs:
-            print(f"  - 原始文本: {ref.original_text}")
-            print(f"    名称: {ref.name}")
-            print(f"    编号: {ref.number}")
-            print(f"    上下文: {ref.context[:100]}...")
-
-    return refs
-
-# 测试过滤逻辑
-def test_filter_logic():
-    print("\n" + "=" * 60)
-    print("测试过滤逻辑")
-    print("=" * 60)
-
-    # 模拟 match_reference_files 返回的数据
-    mock_match_result = [
-        {
-            "review_item": "《公路工程技术标准》(JTG B01-2011)",
-            "has_related_file": True,
-            "has_exact_match": False,
-            "exact_match_info": "",
-            "same_name_current": "《公路工程技术标准》(JTG B01-2014)状态为现行"
-        },
-        {
-            "review_item": "《公路桥涵设计通用规范》(JTG D60-2015)",
-            "has_related_file": True,
-            "has_exact_match": True,
-            "exact_match_info": "《公路桥涵设计通用规范》(JTG D60-2015)状态为现行",
-            "same_name_current": ""
-        }
-    ]
-
-    print("\n模拟 match_reference_files 返回数据:")
-    for idx, item in enumerate(mock_match_result):
-        print(f"\n  项{idx}:")
-        print(f"    review_item: {item['review_item']}")
-        print(f"    has_related_file: {item['has_related_file']}")
-        print(f"    has_exact_match: {item['has_exact_match']}")
-        print(f"    exact_match_info: {item['exact_match_info']}")
-        print(f"    same_name_current: {item['same_name_current']}")
-
-    # 测试旧过滤逻辑(只保留 exact_match_info 不为空的)
-    old_filtered = [item for item in mock_match_result if item.get('exact_match_info')]
-    print(f"\n旧过滤逻辑(只保留 exact_match_info 不为空的): {len(old_filtered)} 个项")
-    for item in old_filtered:
-        print(f"  - {item['review_item']}")
-
-    # 测试新过滤逻辑(保留有相关信息的)
-    new_filtered = [
-        item for item in mock_match_result
-        if item.get('has_related_file') or
-           item.get('exact_match_info') or
-           item.get('same_name_current')
-    ]
-    print(f"\n新过滤逻辑(保留有相关信息的): {len(new_filtered)} 个项")
-    for item in new_filtered:
-        print(f"  - {item['review_item']}")
-
-    # 分析差异
-    missed = [item for item in mock_match_result if item not in old_filtered]
-    if missed:
-        print(f"\n[警告] 旧逻辑漏检的项:")
-        for item in missed:
-            print(f"  - {item['review_item']}")
-            print(f"    has_related_file: {item['has_related_file']}")
-            print(f"    same_name_current: {item['same_name_current']}")
-
-# 完整测试
-async def test_full_review():
-    print("\n" + "=" * 60)
-    print("完整审查测试(需要 Milvus 连接)")
-    print("=" * 60)
-
-    try:
-        async with ContentTimelinessReviewer(max_concurrent=4) as reviewer:
-            results = await reviewer.review_tertiary_content(
-                tertiary_details=test_tertiary_details,
-                collection_name="first_bfp_collection_status"
-            )
-
-            print(f"\n审查完成,共 {len(results)} 个结果:")
-            for idx, result in enumerate(results):
-                print(f"\n  结果{idx}:")
-                print(f"    check_item: {result.get('check_item')}")
-                print(f"    exist_issue: {result.get('exist_issue')}")
-                print(f"    risk_info: {result.get('risk_info')}")
-                check_result = result.get('check_result', {})
-                print(f"    issue_point: {check_result.get('issue_point')}")
-                print(f"    suggestion: {check_result.get('suggestion')}")
-                print(f"    reason: {check_result.get('reason')}")
-
-    except Exception as e:
-        print(f"测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-
-if __name__ == "__main__":
-    # 测试提取器
-    refs = test_extractor()
-
-    # 测试过滤逻辑
-    test_filter_logic()
-
-    # 完整测试(可选)
-    # asyncio.run(test_full_review())