Ver código fonte

v0.0.4-功能更新&debug
- 修复任务ID过期时间问题,从2分钟调整为1h
- 增加内容要点完整性审查
- 完成RAG父子文档召回整体链路
- 更新语义逻辑检查,使其不过于敏感

WangXuMing 1 mês atrás
pai
commit
d983b52c40

+ 2 - 2
core/base/redis_duplicate_checker.py

@@ -137,7 +137,7 @@ class RedisDuplicateChecker:
                         task_data = json.loads(task_info)
                         if task_data.get("callback_task_id") == callback_task_id:
                             created_at = datetime.fromisoformat(task_data['created_at'])
-                            if datetime.now() - created_at < timedelta(minutes=2):
+                            if datetime.now() - created_at < timedelta(hours=1):
                                 return True
                             else:
                                 # 任务已过期,清理
@@ -148,7 +148,7 @@ class RedisDuplicateChecker:
                 for file_id, task_info in self.task_cache.items():
                     if task_info.get("callback_task_id") == callback_task_id:
                         created_at = datetime.fromisoformat(task_info['created_at'])
-                        if datetime.now() - created_at < timedelta(minutes=2):
+                        if datetime.now() - created_at < timedelta(hours=1):
                             return True
                 return False
 

+ 83 - 16
core/construction_review/component/ai_review_engine.py

@@ -200,20 +200,38 @@ class AIReviewEngine(BaseReviewer):
         async def check_with_semaphore(check_func, **kwargs):
             async with self.semaphore:
                 return await check_func(**kwargs)
-            
+
+        # 外层超时配置(单个任务的整体超时时间,略大于内部单次超时15秒)
+        TASK_TIMEOUT = 20
+
         basic_tasks = []
 
         if 'sensitive_word_check'  in self.task_info.get_review_config_list():
             basic_tasks.append(
-                check_with_semaphore(self.check_grammar, trace_id_idx=trace_id_idx, review_content=review_content, review_references=None, review_location_label=review_location_label, state=state, stage_name=stage_name),
+                asyncio.create_task(
+                    asyncio.wait_for(
+                        check_with_semaphore(self.check_grammar, trace_id_idx=trace_id_idx, review_content=review_content, review_references=None, review_location_label=review_location_label, state=state, stage_name=stage_name),
+                        timeout=TASK_TIMEOUT
+                    )
+                )
             )
         if 'semantic_logic_check' in self.task_info.get_review_config_list():
             basic_tasks.append(
-                check_with_semaphore(self.check_semantic_logic, trace_id_idx=trace_id_idx, review_content=review_content, review_references=None, review_location_label=review_location_label, state=state, stage_name=stage_name),
+                asyncio.create_task(
+                    asyncio.wait_for(
+                        check_with_semaphore(self.check_semantic_logic, trace_id_idx=trace_id_idx, review_content=review_content, review_references=None, review_location_label=review_location_label, state=state, stage_name=stage_name),
+                        timeout=TASK_TIMEOUT
+                    )
+                )
             )
         if 'sensitive_check' in self.task_info.get_review_config_list():
             basic_tasks.append(
-                check_with_semaphore(self.check_sensitive, trace_id_idx=trace_id_idx, review_content=review_content, review_references=None, review_location_label=review_location_label, state=state, stage_name=stage_name),
+                asyncio.create_task(
+                    asyncio.wait_for(
+                        check_with_semaphore(self.check_sensitive, trace_id_idx=trace_id_idx, review_content=review_content, review_references=None, review_location_label=review_location_label, state=state, stage_name=stage_name),
+                        timeout=TASK_TIMEOUT
+                    )
+                )
             )
         if 'completeness_check' in self.task_info.get_review_config_list():
             basic_tasks.append(
@@ -228,8 +246,26 @@ class AIReviewEngine(BaseReviewer):
                 "sensitive_check": self._process_review_result(None),
             }
 
-        # 执行任务(只执行一次)
-        results = await asyncio.gather(*basic_tasks, return_exceptions=True)
+        # 使用 asyncio.wait 替代 gather,提供更好的超时控制
+        # 整体超时时间 = 单个任务超时 + 缓冲时间
+        total_timeout = TASK_TIMEOUT * len(basic_tasks) + 10
+
+        done, pending = await asyncio.wait(basic_tasks, timeout=total_timeout)
+
+        # 取消未完成的任务
+        for task in pending:
+            task.cancel()
+            logger.warning(f"[基础审查] 任务超时,已取消")
+
+        # 收集结果
+        results = []
+        for task in done:
+            try:
+                result = task.result()
+                results.append(result)
+            except Exception as e:
+                logger.error(f"[基础审查] 任务执行失败: {str(e)}")
+                results.append(e)
 
         # 根据配置项分配结果
         grammar_result = self._process_review_result(None)
@@ -296,6 +332,9 @@ class AIReviewEngine(BaseReviewer):
             async with self.semaphore:
                 return await check_func(**kwargs)
 
+        # 外层超时配置(单个任务的整体超时时间,略大于内部单次超时15秒)
+        TASK_TIMEOUT = 20
+
         # 根据配置动态创建技术性检查任务
         technical_tasks = []
         task_mapping = []  # 任务名称映射
@@ -303,19 +342,29 @@ class AIReviewEngine(BaseReviewer):
         if 'non_parameter_compliance_check' in self.task_info.get_review_config_list():
             task_mapping.append('non_parameter_compliance')
             technical_tasks.append(
-                check_with_semaphore(self.check_non_parameter_compliance, trace_id_idx=trace_id_idx,
-                                   review_content=review_content, review_references=review_references,
-                                   reference_source=reference_source, review_location_label=review_location_label,
-                                   state=state, stage_name=stage_name)
+                asyncio.create_task(
+                    asyncio.wait_for(
+                        check_with_semaphore(self.check_non_parameter_compliance, trace_id_idx=trace_id_idx,
+                                           review_content=review_content, review_references=review_references,
+                                           reference_source=reference_source, review_location_label=review_location_label,
+                                           state=state, stage_name=stage_name),
+                        timeout=TASK_TIMEOUT
+                    )
+                )
             )
 
         if 'parameter_compliance_check' in self.task_info.get_review_config_list():
             task_mapping.append('parameter_compliance')
             technical_tasks.append(
-                check_with_semaphore(self.check_parameter_compliance, trace_id_idx=trace_id_idx,
-                                   review_content=review_content, review_references=review_references,
-                                   reference_source=reference_source, review_location_label=review_location_label,
-                                   state=state, stage_name=stage_name)
+                asyncio.create_task(
+                    asyncio.wait_for(
+                        check_with_semaphore(self.check_parameter_compliance, trace_id_idx=trace_id_idx,
+                                           review_content=review_content, review_references=review_references,
+                                           reference_source=reference_source, review_location_label=review_location_label,
+                                           state=state, stage_name=stage_name),
+                        timeout=TASK_TIMEOUT
+                    )
+                )
             )
 
         # 一次性执行所有任务,避免重复协程调用
@@ -325,8 +374,26 @@ class AIReviewEngine(BaseReviewer):
                 "parameter_compliance": self._process_review_result(None),
             }
 
-        # 执行任务(只执行一次)
-        results = await asyncio.gather(*technical_tasks, return_exceptions=True)
+        # 使用 asyncio.wait 替代 gather,提供更好的超时控制
+        # 整体超时时间 = 单个任务超时 + 缓冲时间
+        total_timeout = TASK_TIMEOUT * len(technical_tasks) + 10
+
+        done, pending = await asyncio.wait(technical_tasks, timeout=total_timeout)
+
+        # 取消未完成的任务
+        for task in pending:
+            task.cancel()
+            logger.warning(f"[技术审查] 任务超时,已取消")
+
+        # 收集结果
+        results = []
+        for task in done:
+            try:
+                result = task.result()
+                results.append(result)
+            except Exception as e:
+                logger.error(f"[技术审查] 任务执行失败: {str(e)}")
+                results.append(e)
 
         # 根据配置项分配结果
         non_parameter_result = self._process_review_result(None)

+ 0 - 183
core/construction_review/component/reviewers/batch_status_resolver(1).py

@@ -1,183 +0,0 @@
-import json
-import re
-import time
-from typing import Any, Dict, List
-from concurrent.futures import ThreadPoolExecutor, as_completed
-
-import requests
-from foundation.database.base.vector.milvus_vector import MilvusVectorManager
-
-LLM_ENDPOINT = "http://192.168.91.253:9002/v1/chat/completions"
-LLM_MODEL = "Qwen3-8B"
-
-SYSTEM_PROMPT = """
-/no_think
-你是一个【规范性引用文件检索结果的风险点评审助手】。
-
-你会收到一个 JSON 对象,包含:
-- location:传进去检索的文本(原始依据文本)
-- candidates:该条依据对应的向量库候选列表(top3)
-
-你需要基于 location 与 candidates 的一致性/冲突点,输出一个结果对象(仅一个对象),字段为:
-1) issue_point:问题点
-2) suggestion:建议(可执行动作)
-3) reason:原因(必须引用 candidates 关键信息或说明检索不足)
-4) risk_level:风险水平,只能是 "LOW" / "MEDIUM" / "HIGH"
-   - 候选完全无关或名称和编码一致:LOW
-   - 部分冲突或有一定关联但不完全对应:MEDIUM
-   - 明显冲突:HIGH
-
-【输出限制】
-- 只能输出一个 JSON 对象(不是数组)
-- 对象只能包含四个字段:
-  {"issue_point":"", "suggestion":"", "reason":"", "risk_level":"LOW|MEDIUM|HIGH"}
-- 禁止输出解释文字、禁止输出代码块、禁止输出多余字段
-""".strip()
-
-
-BASIS_PATTERN = re.compile(r"《[^》]+》" r"(?:([^)]+))?", re.S)
-RISK_LEVELS = {"LOW", "MEDIUM", "HIGH"}
-JSON_OBJ_PATTERN = re.compile(r"\{[\s\S]*?\}", re.S)
-
-
-def call_llm(
-    messages: List[Dict[str, str]],
-    endpoint: str = LLM_ENDPOINT,
-    model: str = LLM_MODEL,
-    timeout: int = 120,
-) -> str:
-    payload = {"model": model, "messages": messages, "temperature": 0.3}
-    r = requests.post(endpoint, json=payload, timeout=timeout)
-    r.raise_for_status()
-    return r.json()["choices"][0]["message"]["content"]
-
-
-def extract_json_object(text: str) -> Dict[str, Any]:
-    if not text:
-        return {}
-    s = text.strip()
-
-    try:
-        obj = json.loads(s)
-        return obj if isinstance(obj, dict) else {}
-    except Exception:
-        pass
-
-    for chunk in reversed(JSON_OBJ_PATTERN.findall(s)):
-        try:
-            obj = json.loads(chunk)
-            if isinstance(obj, dict):
-                return obj
-        except Exception:
-            continue
-    return {}
-
-
-def build_messages(location: str, reference_source: List[str]) -> List[Dict[str, str]]:
-    # candidates 只给 reference_source 的内容(纯文本列表)
-    user_content = {"location": location, "candidates": reference_source}
-    return [
-        {"role": "system", "content": SYSTEM_PROMPT},
-        {"role": "user", "content": json.dumps(user_content, ensure_ascii=False)},
-    ]
-
-
-def extract_basis(text: str) -> List[str]:
-    return BASIS_PATTERN.findall(text or "")
-
-
-def batch_status_resolver(
-    text: str,
-    collection_name: str = "already_basis_test",
-    top_k_each: int = 3,
-    max_workers: int = 4,
-) -> List[Dict[str, Any]]:
-    items = extract_basis(text)
-    if not items:
-        return []
-
-    manager = MilvusVectorManager()
-
-    prepared: List[Dict[str, Any]] = []
-    for it in items:
-        it = (it or "").strip()
-        retrieved = manager.hybrid_search(
-            param={"collection_name": collection_name},
-            query_text=it,
-            top_k=top_k_each,
-            ranker_type="weighted",
-            dense_weight=0.2,
-            sparse_weight=0.8,
-        ) or []
-
-        # 统一:reference_source 是“当前 reference_source 内容”(纯文本)
-        reference_source = [
-            (c.get("text_content") or "").strip()
-            for c in retrieved
-            if (c.get("text_content") or "").strip()
-        ]
-
-        # review_references 与 reference_source 保持一致:都用纯文本列表(不再保留 id/similarity)
-        prepared.append({
-            "location": it,
-            "reference_source": reference_source,
-            "review_references": reference_source,  # 保持一致
-            "messages": build_messages(it, reference_source) if it else None,  # 给大模型也只给 reference_source
-        })
-
-    results: List[Dict[str, Any]] = [{
-        "issue_point": "",
-        "suggestion": "",
-        "reason": "",
-        "risk_level": "MEDIUM",
-        "location": p["location"],
-        "review_references": p["review_references"],
-        "reference_source": p["reference_source"],
-    } for p in prepared]
-
-    start = time.time()
-    with ThreadPoolExecutor(max_workers=max_workers) as ex:
-        fut2idx = {
-            ex.submit(call_llm, p["messages"]): i
-            for i, p in enumerate(prepared)
-            if p["messages"]
-        }
-
-        for fut in as_completed(fut2idx):
-            i = fut2idx[fut]
-            base = results[i]
-
-            try:
-                out = fut.result()
-            except Exception:
-                base["reason"] = (base["reason"] + ";" if base["reason"] else "") + "大模型调用失败/超时"
-                continue
-
-            obj = extract_json_object(out)
-            if not obj:
-                base["reason"] = (base["reason"] + ";" if base["reason"] else "") + "大模型输出未解析到JSON对象"
-                continue
-
-            base["issue_point"] = str(obj.get("issue_point", base["issue_point"]) or base["issue_point"]).strip()
-            base["suggestion"] = str(obj.get("suggestion", base["suggestion"]) or base["suggestion"]).strip()
-            base["reason"] = str(obj.get("reason", base["reason"]) or base["reason"]).strip()
-
-            rl = str(obj.get("risk_level", base["risk_level"]) or base["risk_level"]).strip()
-            base["risk_level"] = rl if rl in RISK_LEVELS else "MEDIUM"
-
-    print(f"调用LLM耗时: {time.time() - start:.4f} 秒")
-    return results
-
-
-if __name__ == "__main__":
-    text = """
-(1)《架桥机通用技术条件》(GB/T 26470-2025);
-(2)《起重机设计规范》(GB/T 3811-2008);
-(3)《起重机械安全规程 第 5 部分:桥式和门式起重机》(GB 6067.5-2014);
-(4)《电气装置安装工程 起重机电气装置施工及验收规范》(GB 50256-2019);
-(5)《起重设备安装工程施工及验收规范》(GB50278-2010);
-(6)《施工现场机械设备检查技术规范》(JGJ 160-2016);
-    """.strip()
-
-    res = batch_status_resolver(text, collection_name="already_basis_test", max_workers=4)
-    print(res)

+ 216 - 122
core/construction_review/component/reviewers/outline_reviewer.py

@@ -81,13 +81,19 @@ class OutlineReviewer:
             # 阶段1:一级大纲完整性审查(仅在有数据时执行)
             if overall_outline and overall_outline.strip():
                 logger.info("启动阶段1:一级大纲完整性审查...")
-                overall_task = self._overall_completeness_review(overall_outline, trace_id, state, stage_name)
+                # 创建Task对象
+                overall_task = asyncio.create_task(
+                    self._overall_completeness_review(overall_outline, trace_id, state, stage_name)
+                )
                 tasks.append(("overall", overall_task))
 
             # 阶段2:次级大纲逐项审查
             if detailed_outline:
                 logger.info("启动阶段2:次级大纲逐项审查...")
-                detailed_task = self._detailed_item_review(detailed_outline, trace_id, state, stage_name)
+                # 创建Task对象
+                detailed_task = asyncio.create_task(
+                    self._detailed_item_review(detailed_outline, trace_id, state, stage_name)
+                )
                 tasks.append(("detailed", detailed_task))
 
             # 处理空数据情况
@@ -101,48 +107,94 @@ class OutlineReviewer:
                 detailed_review_results = []
                 logger.warning("没有可执行的审查任务")
             else:
-                # 等待所有阶段完成
-                try:
-                    results = await asyncio.gather(*[task for _, task in tasks], return_exceptions=True)
-
-                    # 处理结果
-                    overall_review_result = None
-                    detailed_review_results = []
-
-                    for i, (stage_name, _) in enumerate(tasks):
-                        result = results[i]
-
-                        if stage_name == "overall":
-                            if isinstance(result, Exception):
-                                logger.error(f"阶段1执行异常: {str(result)}")
-                                overall_review_result = {
-                                    "success": False,
-                                    "error_message": f"阶段1异常: {str(result)}",
-                                    "overall_outline": overall_outline,
-                                    "parsed_result": None
-                                }
-                            else:
-                                overall_review_result = result
-                                logger.info(f"阶段1完成,成功: {overall_review_result.get('success', False)}")
-
-                        elif stage_name == "detailed":
-                            if isinstance(result, Exception):
-                                logger.error(f"阶段2执行异常: {str(result)}")
-                                detailed_review_results = []
-                            else:
-                                detailed_review_results = result
-                                logger.info(f"阶段2完成,审查项目数: {len(detailed_review_results)}")
-
-                    logger.info("两阶段并发审查全部完成")
+                # 等待所有阶段完成 - 使用 asyncio.wait 替代 gather
+                # 整体超时:阶段1(60s) + 阶段2(基于任务数动态计算)
+                stage1_timeout = 60
+                stage2_timeout = (50 * len(detailed_outline) / 3) + 60 if detailed_outline else 0
+                total_timeout = max(stage1_timeout, stage2_timeout) + 30  # 并发执行,取最大值+缓冲
 
-                except Exception as e:
-                    logger.error(f"并发执行两阶段审查失败: {str(e)}")
-                    # 降级为串行处理
-                    logger.warning("降级为串行执行两阶段审查")
-                    if overall_outline and overall_outline.strip():
-                        overall_review_result = await self._overall_completeness_review(overall_outline, trace_id, state, stage_name)
-                    if detailed_outline:
-                        detailed_review_results = await self._detailed_item_review(detailed_outline, trace_id, state, stage_name)
+                logger.info(f"[大纲审查] 两阶段整体超时设置: {total_timeout:.0f}秒")
+
+                # 提取任务列表
+                task_list = [task for _, task in tasks]
+
+                done, pending = await asyncio.wait(
+                    task_list,
+                    timeout=total_timeout,
+                    return_when=asyncio.ALL_COMPLETED
+                )
+
+                # 取消未完成的任务
+                for task in pending:
+                    task.cancel()
+                    logger.warning(f"[大纲审查] 阶段任务超时,已取消")
+
+                # 构建任务到阶段名称的映射(关键修复:asyncio.wait返回的done是无序集合)
+                task_to_stage = {task: stage_name for stage_name, task in tasks}
+                
+                # 收集结果并按阶段名称分类
+                stage_results = {}
+                for task in done:
+                    stage_name_key = task_to_stage.get(task)
+                    try:
+                        result = task.result()
+                        logger.info(f"[大纲审查] {stage_name_key} task.result()返回, 类型: {type(result).__name__}")
+                        if isinstance(result, dict):
+                            logger.info(f"[大纲审查] result是字典, 包含键: {list(result.keys())}")
+                        elif isinstance(result, list):
+                            logger.info(f"[大纲审查] result是列表, 长度: {len(result)}")
+                        stage_results[stage_name_key] = result
+                    except asyncio.CancelledError:
+                        logger.error(f"[大纲审查] {stage_name_key} 阶段任务被取消")
+                        stage_results[stage_name_key] = Exception("Stage task cancelled")
+                    except Exception as e:
+                        logger.error(f"[大纲审查] {stage_name_key} 阶段任务执行失败: {str(e)}", exc_info=True)
+                        stage_results[stage_name_key] = e
+                
+                # 处理pending任务
+                for task in pending:
+                    stage_name_key = task_to_stage.get(task)
+                    stage_results[stage_name_key] = Exception("Task not completed (timeout)")
+
+                # 处理结果
+                overall_review_result = None
+                detailed_review_results = []
+
+                for stage_name_key, _ in tasks:
+                    result = stage_results.get(stage_name_key)
+                    logger.info(f"[大纲审查] 处理阶段: stage_name={stage_name_key}, result类型={type(result).__name__ if result else 'None'}")
+
+                    if stage_name_key == "overall":
+                        if isinstance(result, Exception):
+                            logger.error(f"阶段1执行异常: {str(result)}")
+                            overall_review_result = {
+                                "success": False,
+                                "error_message": f"阶段1异常: {str(result)}",
+                                "overall_outline": overall_outline,
+                                "parsed_result": None
+                            }
+                        elif isinstance(result, dict):
+                            overall_review_result = result
+                            logger.info(f"阶段1完成,成功: {overall_review_result.get('success', False)}")
+                        else:
+                            # 处理意外的返回类型(如列表)
+                            logger.error(f"阶段1返回了意外的类型: {type(result).__name__}")
+                            overall_review_result = {
+                                "success": False,
+                                "error_message": f"阶段1返回了意外的类型: {type(result).__name__}",
+                                "overall_outline": overall_outline,
+                                "parsed_result": None
+                            }
+
+                    elif stage_name_key == "detailed":
+                        if isinstance(result, Exception):
+                            logger.error(f"阶段2执行异常: {str(result)}")
+                            detailed_review_results = []
+                        else:
+                            detailed_review_results = result
+                            logger.info(f"阶段2完成,审查项目数: {len(detailed_review_results)}")
+
+                logger.info("两阶段并发审查全部完成")
 
             # 返回完整结果
             return {
@@ -179,78 +231,94 @@ class OutlineReviewer:
         Returns:
             一级大纲审查结果
         """
-        if not overall_outline or not overall_outline.strip():
-            logger.warning("一级大纲为空或仅包含空白字符")
-            return {
-                "success": False,
-                "error_message": "一级大纲为空,无法进行完整性审查",
-                "overall_outline": overall_outline,
-                "parsed_result": None
-            }
+        try:
+            if not overall_outline or not overall_outline.strip():
+                logger.warning("一级大纲为空或仅包含空白字符")
+                return {
+                    "success": False,
+                    "error_message": "一级大纲为空,无法进行完整性审查",
+                    "overall_outline": overall_outline,
+                    "parsed_result": None
+                }
 
-        logger.info("执行一级大纲完整性审查...")
+            logger.info("执行一级大纲完整性审查...")
 
-        # 构建提示词参数
-        prompt_kwargs = {}
-        prompt_kwargs["review_content"] = overall_outline
+            # 构建提示词参数
+            prompt_kwargs = {}
+            prompt_kwargs["review_content"] = overall_outline
 
-        # 获取一级大纲审查提示词模板
-        task_prompt = self.prompt_loader.get_prompt_template(
-            self.reviewer_type,
-            "overall_outline_completeness_review",
-            **prompt_kwargs
-        )
+            # 获取一级大纲审查提示词模板
+            task_prompt = self.prompt_loader.get_prompt_template(
+                self.reviewer_type,
+                "overall_outline_completeness_review",
+                **prompt_kwargs
+            )
 
-        task_prompt_info = {
-            "task_prompt": task_prompt,
-            "task_name": "一级大纲完整性审查"
-        }
+            task_prompt_info = {
+                "task_prompt": task_prompt,
+                "task_name": "一级大纲完整性审查"
+            }
 
-        # 调用模型进行审查
-        model_response = await self.model_client.get_model_generate_invoke(
-            trace_id=trace_id,
-            task_prompt_info=task_prompt_info
-        )
+            # 调用模型进行审查 - 大纲审查设置90秒超时
+            model_response = await self.model_client.get_model_generate_invoke(
+                trace_id=trace_id,
+                task_prompt_info=task_prompt_info,
+                timeout=90
+            )
 
-        response_text = model_response
-        # 直接提取JSON数据,避免关键词误判
-        json_data = self.inter_tool._extract_json_data(response_text)
-        overall_completeness_result = []
+            response_text = model_response
+            # 直接提取JSON数据,避免关键词误判
+            json_data = self.inter_tool._extract_json_data(response_text)
+            overall_completeness_result = []
+
+            if json_data and isinstance(json_data, list):
+                for item in json_data:
+                    overall_completeness_result.append(self.inter_tool._create_issue_item(item, "completeness_check"))
+            elif json_data and isinstance(json_data, dict):
+                overall_completeness_result.append(self.inter_tool._create_issue_item(json_data, "completeness_check"))
+            #filtered_issues = [r for r in overall_completeness_result if self._is_non_compliant_item(r)]
+            # 只统计exist_issue为true的项目数量
+            issue_count = sum(1 for item in overall_completeness_result if item.get('exist_issue', False))
+            message=f"一级大纲完整性审查完成,发现 {issue_count} 个问题",
+            if issue_count == 0:
+                message = "一级大纲完整性审查已通过,未发现缺失项"
 
-        if json_data and isinstance(json_data, list):
-            for item in json_data:
-                overall_completeness_result.append(self.inter_tool._create_issue_item(item, "completeness_check"))
-        elif json_data and isinstance(json_data, dict):
-            overall_completeness_result.append(self.inter_tool._create_issue_item(json_data, "completeness_check"))
-        #filtered_issues = [r for r in overall_completeness_result if self._is_non_compliant_item(r)]
-        # 只统计exist_issue为true的项目数量
-        issue_count = sum(1 for item in overall_completeness_result if item.get('exist_issue', False))
-        message=f"一级大纲完整性审查完成,发现 {issue_count} 个问题",
-        if issue_count == 0:
-            message = "一级大纲完整性审查已通过,未发现缺失项"
- 
-        if state and state.get("progress_manager"):
-            # 使用try-catch确保SSE推送失败不会影响主流程
-            try:
-                await state["progress_manager"].update_stage_progress(
-                    callback_task_id=state["callback_task_id"],
-                    stage_name=f"{stage_name} - 阶段1:一级大纲完整性审查",
-                    current=None,  # 明确不更新current,保持主流程进度
-                    status="processing",
-                    message=message,
-                    issues=overall_completeness_result,
-                    event_type="processing"  # 使用专门的事件类型
-                )
-                logger.info("SSE推送成功: 一级大纲完整性审查完成")
-            except Exception as e:
-                logger.error(f"SSE推送失败: 一级大纲完整性审查, 错误: {str(e)}")
-                # 不抛出异常,避免影响主流程
+            if state and state.get("progress_manager"):
+                # 使用try-catch确保SSE推送失败不会影响主流程
+                try:
+                    # 【修复】明确传递issues的副本,避免变量混淆
+                    issues_copy = list(overall_completeness_result) if overall_completeness_result else []
+                    await state["progress_manager"].update_stage_progress(
+                        callback_task_id=state["callback_task_id"],
+                        stage_name=f"{stage_name} - 阶段1:一级大纲完整性审查",
+                        current=None,  # 明确不更新current,保持主流程进度
+                        status="processing",
+                        message=message,
+                        issues=issues_copy,
+                        event_type="processing"  # 使用专门的事件类型
+                    )
+                    logger.info("SSE推送成功: 一级大纲完整性审查完成")
+                except Exception as e:
+                    logger.error(f"SSE推送失败: 一级大纲完整性审查, 错误: {str(e)}")
+                    # 不抛出异常,避免影响主流程
 
-        return {
-            "success": True,
-            "overall_outline": overall_outline,
-            "parsed_result": overall_completeness_result
-        }
+            # 【调试】明确返回字典,使用新的变量名避免混淆
+            final_result = {
+                "success": True,
+                "overall_outline": overall_outline,
+                "parsed_result": overall_completeness_result
+            }
+            logger.info(f"[大纲审查-阶段1] 准备返回final_result, 类型: {type(final_result).__name__}, 包含键: {list(final_result.keys())}")
+            return final_result
+
+        except Exception as e:
+            logger.error(f"一级大纲完整性审查异常: {str(e)}", exc_info=True)
+            return {
+                "success": False,
+                "error_message": f"一级大纲完整性审查异常: {str(e)}",
+                "overall_outline": overall_outline,
+                "parsed_result": None
+            }
 
     async def _detailed_item_review(self, detailed_outline: list, trace_id: str,state, stage_name) -> list:
         """
@@ -276,22 +344,46 @@ class OutlineReviewer:
 
         logger.info(f"开始次级大纲并发审查,有效项目数量: {len(valid_items)}")
 
-        # 创建并发审查任务
-        semaphore = asyncio.Semaphore(20)  # 限制并发数为5,避免过载
+        # 创建并发审查任务 - 降低并发数避免模型服务过载
+        semaphore = asyncio.Semaphore(3)  # 限制并发数为3,避免过载
+
         tasks = []
 
         for i, outline_item in valid_items:
-            task = self._concurrent_single_review(i, outline_item, trace_id, semaphore, state, stage_name)
+            # 只用信号量控制并发,不添加外层wait_for(避免双重超时控制)
+            task = asyncio.create_task(
+                self._concurrent_single_review(i, outline_item, trace_id, semaphore, state, stage_name)
+            )
             tasks.append(task)
 
-        # 等待所有任务完成
-        try:
-            results = await asyncio.gather(*tasks, return_exceptions=True)
-            logger.info(f"并发审查完成,总任务数: {len(tasks)}")
-        except Exception as e:
-            logger.error(f"并发审查失败: {str(e)}")
-            # 如果并发失败,降级为串行处理
-            return await self._fallback_sequential_review(valid_items, trace_id,state, stage_name)
+        # 使用 asyncio.wait 提供超时控制
+        # 整体超时:每个任务预计最多48秒(15×3+0.5+1+2),乘以任务数的1/3(并发数为3)
+        estimated_time_per_task = 50  # 秒
+        total_timeout = (estimated_time_per_task * len(tasks) / 3) + 60  # 加60秒缓冲
+
+        logger.info(f"[大纲审查] 设置整体超时: {total_timeout:.0f}秒,任务数: {len(tasks)}")
+
+        done, pending = await asyncio.wait(tasks, timeout=total_timeout)
+
+        # 取消未完成的任务
+        for task in pending:
+            task.cancel()
+            logger.warning(f"[大纲审查] 任务超时,已取消")
+
+        # 收集结果
+        results = []
+        for task in done:
+            try:
+                result = task.result()
+                results.append(result)
+            except asyncio.CancelledError:
+                logger.error(f"[大纲审查] 任务被取消")
+                results.append(Exception("Task cancelled"))
+            except Exception as e:
+                logger.error(f"[大纲审查] 任务执行失败: {str(e)}", exc_info=True)
+                results.append(e)
+
+        logger.info(f"并发审查完成,总任务数: {len(tasks)}, 成功: {len(done)}, 超时: {len(pending)}")
 
         # 处理结果
         detailed_review_results = []
@@ -431,10 +523,11 @@ class OutlineReviewer:
             "task_name": f"单项大纲完整性审查-{category}"
         }
 
-        # 调用模型进行审查
+        # 调用模型进行审查 - 大纲审查设置90秒超时
         model_response = await self.model_client.get_model_generate_invoke(
             trace_id=f"{trace_id}_item_{item_index}",
-            task_prompt_info=task_prompt_info
+            task_prompt_info=task_prompt_info,
+            timeout=90
         )
 
         response_text = model_response
@@ -513,10 +606,11 @@ class OutlineReviewer:
                 "task_name": "章节目录分类器"
             }
 
-            # 调用模型
+            # 调用模型 - 大纲审查设置90秒超时
             model_response = await self.model_client.get_model_generate_invoke(
                 trace_id=trace_id,
-                task_prompt_info=task_prompt_info
+                task_prompt_info=task_prompt_info,
+                timeout=90
             )
 
             # 提取分类结果

+ 3 - 2
core/construction_review/component/reviewers/prep_basis_reviewer.py

@@ -333,10 +333,11 @@ class LLMReviewClient:
                 "task_name": "规范性引用文件识别与状态判断"
             }
 
-            # 调用统一模型客户端
+            # 调用统一模型客户端 - 编制依据审查设置90秒超时
             response = await generate_model_client.get_model_generate_invoke(
                 trace_id=trace_id,
-                task_prompt_info=task_prompt_info
+                task_prompt_info=task_prompt_info,
+                timeout=90
             )
             return response
 

+ 3 - 2
core/construction_review/component/reviewers/reference_basis_reviewer.py

@@ -125,10 +125,11 @@ class LLMReviewClient:
             }
             logger.info(f" 模型调用准备阶段: {task_prompt_info}")
 
-            # 调用统一模型客户端
+            # 调用统一模型客户端 - 编制依据审查设置90秒超时
             response = await generate_model_client.get_model_generate_invoke(
                 trace_id=trace_id,
-                task_prompt_info=task_prompt_info
+                task_prompt_info=task_prompt_info,
+                timeout=90
             )
             return response
 

+ 3 - 2
core/construction_review/component/reviewers/timeliness_basis_reviewer.py

@@ -209,10 +209,11 @@ class LLMReviewClient:
             }
             logger.info(f" 模型调用准备阶段: {task_prompt_info}")
 
-            # 调用统一模型客户端
+            # 调用统一模型客户端 - 编制依据审查设置90秒超时
             response = await generate_model_client.get_model_generate_invoke(
                 trace_id=trace_id,
-                task_prompt_info=task_prompt_info
+                task_prompt_info=task_prompt_info,
+                timeout=90
             )
             return response
 

+ 36 - 10
core/construction_review/workflows/ai_review_workflow.py

@@ -61,7 +61,7 @@ DEFAULT_SLICE_START_INDEX = 30
 MAX_PROGRESS_PERCENTAGE = 100
 RISK_LEVELS = {"high": "高风险", "medium": "中风险", "low": "低风险"}
 DEFAULT_RISK_LEVEL = "medium"
-REVIEW_TIMEOUT = 300  # 单个审查单元超时时间(秒)
+REVIEW_TIMEOUT = 60  # 单个审查任务超时时间(秒),包括基础审查和技术审查
 WORKFLOW_TIMEOUT = 1800  # 整个工作流超时时间(秒,30分钟)
 
 
@@ -893,18 +893,44 @@ class AIReviewCoreFun:
             stage_name = f"AI审查:{section_label}"
             #logger.info(f"test_review_location_label:{trace_id_idx}: {review_location_label}")
             review_tasks = [
-                asyncio.wait_for(
-                    self.ai_review_engine.basic_compliance_check(trace_id_idx, unit_content, review_location_label,state,stage_name),
-                    timeout=REVIEW_TIMEOUT
+                asyncio.create_task(
+                    asyncio.wait_for(
+                        self.ai_review_engine.basic_compliance_check(trace_id_idx, unit_content, review_location_label,state,stage_name),
+                        timeout=REVIEW_TIMEOUT
+                    )
                 ),
-                asyncio.wait_for(
-                    self.ai_review_engine.technical_compliance_check(trace_id_idx, unit_content,review_location_label,state,stage_name),
-                    timeout=REVIEW_TIMEOUT
+                asyncio.create_task(
+                    asyncio.wait_for(
+                        self.ai_review_engine.technical_compliance_check(trace_id_idx, unit_content,review_location_label,state,stage_name),
+                        timeout=REVIEW_TIMEOUT
+                    )
                 ),
             ]
- 
-            # 等待所有审查完成
-            review_results = await asyncio.gather(*review_tasks, return_exceptions=True)
+
+            # 使用 asyncio.wait 替代 gather,提供更好的超时控制
+            # 整体超时 = 两个任务的超时之和 + 缓冲时间
+            total_timeout = REVIEW_TIMEOUT * len(review_tasks) + 10
+
+            done, pending = await asyncio.wait(review_tasks, timeout=total_timeout)
+
+            # 取消未完成的任务
+            for task in pending:
+                task.cancel()
+                logger.warning(f"[工作流] 审查任务超时,已取消: trace_id={trace_id_idx}")
+
+            # 收集结果
+            review_results = []
+            for task in done:
+                try:
+                    result = task.result()
+                    review_results.append(result)
+                except Exception as e:
+                    logger.error(f"[工作流] 审查任务执行失败: {str(e)}, trace_id={trace_id_idx}")
+                    review_results.append(e)
+
+            # 确保有两个结果(基础审查和技术审查)
+            while len(review_results) < 2:
+                review_results.append(Exception("Task not executed"))
             # 处理异常结果
             basic_result = review_results[0] if not isinstance(review_results[0], Exception) else {"error": str(review_results[0])}
             technical_result = review_results[1] if len(review_results) > 1 and not isinstance(review_results[1], Exception) else {"error": str(review_results[1]) if len(review_results) > 1 else "No result"}

+ 20 - 9
foundation/ai/agent/generate/model_generate.py

@@ -31,13 +31,27 @@ class GenerateModelClient:
         self.max_retries = max_retries
         self.backoff_factor = backoff_factor
 
-    async def _retry_with_backoff(self, func: Callable, *args, **kwargs):
+    async def _retry_with_backoff(self, func: Callable, *args, timeout: Optional[int] = None, **kwargs):
         """
-        带指数退避的重试机制
+        带指数退避的重试机制,每次重试都有独立的超时控制
         """
+        current_timeout = timeout or self.default_timeout
+
         for attempt in range(self.max_retries + 1):
             try:
-                return await func(*args, **kwargs)
+                # 每次重试都有独立的超时时间
+                return await asyncio.wait_for(
+                    func(*args, **kwargs),
+                    timeout=current_timeout
+                )
+            except asyncio.TimeoutError:
+                if attempt == self.max_retries:
+                    logger.error(f"[模型调用] 达到最大重试次数 {self.max_retries},最终超时")
+                    raise TimeoutError(f"模型调用在 {self.max_retries} 次重试后均超时")
+
+                wait_time = self.backoff_factor * (2 ** attempt)
+                logger.warning(f"[模型调用] 第 {attempt + 1} 次超时, {wait_time}秒后重试...")
+                await asyncio.sleep(wait_time)
             except Exception as e:
                 if attempt == self.max_retries:
                     logger.error(f"[模型调用] 达到最大重试次数 {self.max_retries},最终失败: {str(e)}")
@@ -64,11 +78,8 @@ class GenerateModelClient:
                 loop = asyncio.get_event_loop()
                 return await loop.run_in_executor(None, self.llm.invoke, messages)
 
-            # 使用超时包装调用
-            response = await asyncio.wait_for(
-                self._retry_with_backoff(_invoke_model),
-                timeout=current_timeout
-            )
+            # 调用带重试机制的方法,超时控制在重试机制内部处理
+            response = await self._retry_with_backoff(_invoke_model, timeout=current_timeout)
 
             elapsed_time = time.time() - start_time
             logger.info(f"[模型调用] 成功 trace_id: {trace_id}, 耗时: {elapsed_time:.2f}s")
@@ -115,4 +126,4 @@ class GenerateModelClient:
             logger.error(f"[模型流式调用] 异常 trace_id: {trace_id}, 耗时: {elapsed_time:.2f}s, 错误类型: {type(e).__name__}, 错误信息: {str(e)}")
             raise
 
-generate_model_client = GenerateModelClient(default_timeout=30, max_retries=3, backoff_factor=1.0)
+generate_model_client = GenerateModelClient(default_timeout=15, max_retries=2, backoff_factor=0.5)

Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 5
logs/agent_debug.log.1


Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 4544
logs/agent_debug.log.2


Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 1424
logs/agent_debug.log.3


Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 157
logs/agent_debug.log.4


Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 0
logs/agent_debug.log.5


Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 5
logs/agent_info.log.1


Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 4544
logs/agent_info.log.2


Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 1424
logs/agent_info.log.3


Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 157
logs/agent_info.log.4


Diferenças do arquivo suprimidas por serem muito extensas
+ 0 - 0
logs/agent_info.log.5


+ 250 - 0
utils_test/Sync_Funcation_Test/test_异步方法阻塞问题测试.py

@@ -0,0 +1,250 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+'''
+@Project    : lq-agent-api
+@File       :test_async_blocking_demo.py
+@IDE        :PyCharm
+@Author     :
+@Date       :2025/12/26
+'''
+
+import asyncio
+import time
+from typing import List, Tuple
+
+# ==================== 模拟任务函数 ====================
+
+async def normal_task(name: str, delay: float) -> str:
+    """
+    正常任务,会在指定延迟后完成
+    """
+    print(f"[{time.strftime('%H:%M:%S')}] [成功] {name} 开始执行(预计{delay}秒后完成)")
+    await asyncio.sleep(delay)
+    print(f"[{time.strftime('%H:%M:%S')}] [成功] {name} 完成")
+    return f"{name}的结果"
+
+async def hanging_task(name: str) -> str:
+    """
+    会卡住的任务,模拟无响应的情况
+    """
+    print(f"[{time.strftime('%H:%M:%S')}] [卡住] {name} 开始执行(会卡住)")
+    await asyncio.sleep(100)  # 模拟卡住
+    return f"{name}的结果"
+
+async def timeout_task(name: str, delay: float) -> str:
+    """
+    会超时的任务
+    """
+    print(f"[{time.strftime('%H:%M:%S')}] [超时] {name} 开始执行(预计{delay}秒,超过2秒会超时)")
+    await asyncio.sleep(delay)
+    return f"{name}的结果"
+
+# ==================== 测试场景 ====================
+
+async def test_scenario_1_gather_without_protection():
+    """
+    场景1: 使用 gather 但没有超时保护 - 会导致永久阻塞
+    """
+    print("\n" + "="*80)
+    print("场景1: gather + 无超时保护 (会永久阻塞)")
+    print("="*80)
+
+    start = time.time()
+
+    try:
+        # 添加整体超时保护,否则会永久卡住
+        results = await asyncio.wait_for(
+            asyncio.gather(
+                normal_task("任务1", 1),
+                normal_task("任务2", 0.5),
+                hanging_task("任务3"),  # 这个会卡住
+            ),
+            timeout=5  # 5秒后强制超时
+        )
+    except asyncio.TimeoutError:
+        print(f"[{time.strftime('%H:%M:%S')}] [警告] 整体超时!被任务3阻塞了")
+
+    elapsed = time.time() - start
+    print(f"\n[统计] 总耗时: {elapsed:.1f}秒")
+    print("[问题] 任务1和任务2早就完成了,但要等任务3超时才结束")
+
+
+async def test_scenario_2_gather_with_wait_for():
+    """
+    场景2: gather + wait_for (每个任务有独立超时)
+    """
+    print("\n" + "="*80)
+    print("场景2: gather + wait_for (每个任务独立超时)")
+    print("="*80)
+
+    start = time.time()
+
+    try:
+        results = await asyncio.gather(
+            asyncio.wait_for(normal_task("任务1", 1), timeout=3),
+            asyncio.wait_for(timeout_task("任务2", 5), timeout=2),  # 会超时
+            asyncio.wait_for(normal_task("任务3", 0.5), timeout=3),
+        )
+        print(f"\n[统计] 所有任务结果: {results}")
+    except asyncio.TimeoutError as e:
+        print(f"[{time.strftime('%H:%M:%S')}] [警告] 某个任务超时: {e}")
+        print("[问题] 任务2超时导致整个gather失败,任务1和任务3的结果也丢失了")
+
+    elapsed = time.time() - start
+    print(f"[统计] 总耗时: {elapsed:.1f}秒")
+
+
+async def test_scenario_3_gather_with_return_exceptions():
+    """
+    场景3: gather + return_exceptions=True (推荐方案1)
+    """
+    print("\n" + "="*80)
+    print("场景3: gather + wait_for + return_exceptions=True [推荐]")
+    print("="*80)
+
+    start = time.time()
+
+    results = await asyncio.gather(
+        asyncio.wait_for(normal_task("任务1", 1), timeout=3),
+        asyncio.wait_for(timeout_task("任务2", 5), timeout=2),  # 会超时
+        asyncio.wait_for(normal_task("任务3", 0.5), timeout=3),
+        return_exceptions=True  # 关键:超时返回异常而不是抛出
+    )
+
+    print(f"\n[统计] 所有任务结果:")
+    for i, result in enumerate(results, 1):
+        if isinstance(result, Exception):
+            print(f"  任务{i}: [失败] {type(result).__name__}")
+        else:
+            print(f"  任务{i}: [成功] {result}")
+
+    elapsed = time.time() - start
+    print(f"\n[统计] 总耗时: {elapsed:.1f}秒")
+    print("[优点] 任务1和任务3成功,任务2超时但不影响其他任务")
+
+
+async def test_scenario_4_wait_with_timeout():
+    """
+    场景4: asyncio.wait + 超时控制 (推荐方案2)
+    """
+    print("\n" + "="*80)
+    print("场景4: asyncio.wait + 整体超时 + 自动取消未完成任务 [推荐]")
+    print("="*80)
+
+    start = time.time()
+
+    # 创建任务
+    tasks = [
+        asyncio.create_task(asyncio.wait_for(normal_task("任务1", 1), timeout=3)),
+        asyncio.create_task(asyncio.wait_for(timeout_task("任务2", 5), timeout=2)),
+        asyncio.create_task(asyncio.wait_for(normal_task("任务3", 0.5), timeout=3)),
+        asyncio.create_task(hanging_task("任务4")),  # 会卡住
+    ]
+
+    # 等待任务完成(整体超时)
+    done, pending = await asyncio.wait(tasks, timeout=5)
+
+    print(f"\n[{time.strftime('%H:%M:%S')}] [统计] 完成状态: 已完成={len(done)}, 未完成={len(pending)}")
+
+    # 取消未完成的任务
+    for task in pending:
+        task.cancel()
+        print(f"[{time.strftime('%H:%M:%S')}] [取消] 取消未完成任务")
+
+    # 收集结果
+    print(f"\n[统计] 任务执行结果:")
+    for i, task in enumerate(tasks, 1):
+        try:
+            if task.done():
+                result = task.result()
+                print(f"  任务{i}: [成功] {result}")
+            else:
+                print(f"  任务{i}: [未完成] (已取消)")
+        except Exception as e:
+            print(f"  任务{i}: [失败] {type(e).__name__}")
+
+    elapsed = time.time() - start
+    print(f"\n[统计] 总耗时: {elapsed:.1f}秒")
+    print("[优点] 可以精确控制哪些任务完成,哪些未完成,并自动取消未完成任务")
+
+
+async def test_scenario_5_real_world_simulation():
+    """
+    场景5: 模拟真实场景 - 多个审查任务并发执行
+    """
+    print("\n" + "="*80)
+    print("场景5: 真实场景模拟 - 并发审查多个文档")
+    print("="*80)
+
+    # 模拟审查任务
+    async def review_document(doc_id: str, processing_time: float) -> dict:
+        print(f"[{time.strftime('%H:%M:%S')}] [文档] 开始审查文档 {doc_id}")
+        await asyncio.sleep(processing_time)
+        return {"doc_id": doc_id, "status": "通过", "issues": []}
+
+    # 创建多个审查任务
+    review_tasks = {
+        "doc_001": asyncio.create_task(review_document("doc_001", 0.8)),
+        "doc_002": asyncio.create_task(review_document("doc_002", 1.2)),
+        "doc_003": asyncio.create_task(review_document("doc_003", 5.0)),  # 会超时
+        "doc_004": asyncio.create_task(review_document("doc_004", 0.5)),
+    }
+
+    print(f"\n[{time.strftime('%H:%M:%S')}] [开始] 并发启动 {len(review_tasks)} 个审查任务")
+
+    start = time.time()
+
+    # 方案A: 使用 gather + return_exceptions (推荐)
+    print("\n--- 使用 gather + return_exceptions ---")
+    results = await asyncio.gather(
+        *[asyncio.wait_for(task, timeout=2) for task in review_tasks.values()],
+        return_exceptions=True
+    )
+
+    # 统计结果
+    success_count = sum(1 for r in results if not isinstance(r, Exception))
+    timeout_count = sum(1 for r in results if isinstance(r, (asyncio.TimeoutError, TimeoutError)))
+
+    print(f"\n[统计] 审查结果统计:")
+    print(f"  [成功] {success_count} 个文档")
+    print(f"  [超时] {timeout_count} 个文档")
+    print(f"  [成功率] {success_count/len(results)*100:.1f}%")
+
+    elapsed = time.time() - start
+    print(f"\n[统计] 总耗时: {elapsed:.1f}秒")
+    print("[说明] 虽然doc_003超时,但其他文档都成功审查,整体流程未受阻")
+
+
+# ==================== 主函数 ====================
+
+async def main():
+    """
+    运行所有测试场景
+    """
+    print("="*80)
+    print("异步方法阻塞问题演示")
+    print("="*80)
+    print("\n本演示将展示不同的异步并发模式及其对阻塞问题的处理方式")
+    print("请观察每种场景的耗时和任务完成情况\n")
+
+    # 运行所有测试
+    await test_scenario_1_gather_without_protection()
+    await test_scenario_2_gather_with_wait_for()
+    await test_scenario_3_gather_with_return_exceptions()
+    await test_scenario_4_wait_with_timeout()
+    await test_scenario_5_real_world_simulation()
+
+    print("\n" + "="*80)
+    print("演示完成!")
+    print("="*80)
+    print("\n[总结] 建议:")
+    print("1. [避免] gather + 无超时保护 → 会永久阻塞")
+    print("2. [谨慎] gather + wait_for → 单个超时会丢失所有结果")
+    print("3. [推荐] gather + wait_for + return_exceptions=True")
+    print("4. [推荐] asyncio.wait + 超时 + 手动取消未完成任务")
+    print("5. [提示] 根据场景选择: 需要所有结果用方案3,需要精细控制用方案4")
+
+
+if __name__ == "__main__":
+    # 运行演示
+    asyncio.run(main())

Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff