Ver Fonte

Merge branch 'dev_sgsc_xth' of CRBC-MaaS-Platform-Project/LQAgentPlatform into dev

feat(sgsc-文档切分模块-xth): 新增OCR引擎切换与性能优化功能
LingMin há 2 semanas atrás
pai
commit
80d88c93ba

+ 215 - 0
config/config.ini

@@ -0,0 +1,215 @@
+
+
+[model]
+MODEL_TYPE=qwen3_5_35b_a3b
+
+# Embedding模型类型选择: lq_qwen3_8b_emd, siliconflow_embed
+EMBEDDING_MODEL_TYPE=lq_qwen3_8b_emd
+
+# Rerank模型类型选择: bge_rerank_model, lq_rerank_model, silicoflow_rerank_model
+RERANK_MODEL_TYPE=lq_rerank_model
+
+# 完整性审查模型类型 (用于 llm_content_classifier_v2)
+COMPLETENESS_REVIEW_MODEL_TYPE=qwen3_5_122b_a10b
+
+
+[deepseek]
+DEEPSEEK_SERVER_URL=https://api.deepseek.com
+DEEPSEEK_MODEL_ID=deepseek-chat
+DEEPSEEK_API_KEY=sk-9fe722389bac47e9ab30cf45b32eb736
+
+[doubao]
+DOUBAO_SERVER_URL=https://ark.cn-beijing.volces.com/api/v3/
+DOUBAO_MODEL_ID=doubao-seed-1-6-flash-250715
+DOUBAO_API_KEY=c98686df-506f-432c-98de-32e571a8e916
+
+
+[qwen]
+QWEN_SERVER_URL=http://192.168.91.253:8003/v1/
+QWEN_MODEL_ID=qwen3-30b
+QWEN_API_KEY=sk-123456
+
+# Qwen3-30B 独立配置(与qwen配置相同,方便后续独立管理)
+[qwen3_30b]
+QWEN3_30B_SERVER_URL=http://192.168.91.253:8003/v1/
+QWEN3_30B_MODEL_ID=qwen3-30b
+QWEN3_30B_API_KEY=sk-123456
+
+
+[ai_review]
+# 调试模式配置
+MAX_REVIEW_UNITS=5
+REVIEW_MODE=all
+# REVIEW_MODE=all/random/first
+
+
+[app]
+APP_CODE=lq-agent
+APP_SECRET=sx-73d32556-605e-11f0-9dd8-acde48001122
+
+
+[launch]
+HOST = 0.0.0.0
+LAUNCH_PORT = 8002
+
+[redis]
+REDIS_URL=redis://:123456@127.0.0.1:6379
+REDIS_HOST=127.0.0.1
+REDIS_PORT=6379
+REDIS_DB=0
+REDIS_PASSWORD=123456
+REDIS_MAX_CONNECTIONS=50
+
+[ocr]
+# OCR 引擎选择(以下写法都支持):
+# GLM-OCR: glm_ocr | glm-ocr | glmocr
+# MinerU:  mineru | mineru-ocr | mineru_ocr
+# 默认: glm_ocr
+ENGINE=glm-ocr
+
+# GLM-OCR 配置
+GLM_OCR_API_URL=http://183.220.37.46:25429/v1/chat/completions
+GLM_OCR_TIMEOUT=600
+
+# MinerU 配置  
+MINERU_API_URL=http://183.220.37.46:25428/file_parse
+MINERU_TIMEOUT=300
+
+[log]
+LOG_FILE_PATH=logs
+LOG_FILE_MAX_MB=10
+LOG_BACKUP_COUNT=5
+CONSOLE_OUTPUT=True
+
+[user_lists]
+USERS=['user-001']
+
+
+[siliconflow]
+SLCF_MODEL_SERVER_URL=https://api.siliconflow.cn/v1
+SLCF_API_KEY=sk-rdabeukkgfwyelstbqlcupsrwfkmduqvadztvxeyumvllstt
+SLCF_CHAT_MODEL_ID=test-model
+SLCF_EMBED_MODEL_ID=netease-youdao/bce-embedding-base_v1
+SLCF_REANKER_MODEL_ID=BAAI/bge-reranker-v2-m3
+SLCF_VL_CHAT_MODEL_ID=THUDM/GLM-4.1V-9B-Thinking
+
+[siliconflow_embed]
+# 硅基流动 Embedding 模型配置
+SLCF_EMBED_SERVER_URL=https://api.siliconflow.cn/v1
+SLCF_EMBED_API_KEY=sk-rdabeukkgfwyelstbqlcupsrwfkmduqvadztvxeyumvllstt
+SLCF_EMBED_MODEL_ID=Qwen/Qwen3-Embedding-8B
+SLCF_EMBED_DIMENSIONS=4096
+
+[lq_qwen3_8b]
+QWEN_LOCAL_1_5B_SERVER_URL=http://192.168.91.253:9002/v1
+QWEN_LOCAL_1_5B_MODEL_ID=Qwen3-8B
+QWEN_LOCAL_1_5B_API_KEY=dummy
+
+# 本地部署的Qwen3-Embedding-8B配置
+[lq_qwen3_8b_emd]
+LQ_EMBEDDING_SERVER_URL=http://192.168.91.253:9003/v1
+LQ_EMBEDDING_MODEL_ID=Qwen3-Embedding-8B
+LQ_EMBEDDING_API_KEY=dummy
+
+[lq_qwen3_4b]
+QWEN_LOCAL_1_5B_SERVER_URL=http://192.168.91.253:9001/v1
+QWEN_LOCAL_1_5B_MODEL_ID=Qwen3-4B
+QWEN_LOCAL_1_5B_API_KEY=dummy
+
+# 本地部署的Qwen3-Reranker-8B配置
+[lq_rerank_model]
+LQ_RERANKER_SERVER_URL=http://192.168.91.253:9004/v1/rerank
+LQ_RERANKER_MODEL=Qwen3-Reranker-8B
+LQ_RERANKER_API_KEY=dummy
+LQ_RERANKER_TOP_N=10
+
+# 硅基流动API的Qwen3-Reranker-8B配置
+[silicoflow_rerank_model]
+SILICOFLOW_RERANKER_API_URL=https://api.siliconflow.cn/v1/rerank
+SILICOFLOW_RERANKER_API_KEY=sk-rdabeukkgfwyelstbqlcupsrwfkmduqvadztvxeyumvllstt
+SILICOFLOW_RERANKER_MODEL=Qwen/Qwen3-Reranker-8B
+
+# BGE Reranker配置
+[bge_rerank_model]
+BGE_RERANKER_SERVER_URL=http://192.168.91.253:9004/rerank
+BGE_RERANKER_MODEL=BAAI/bge-reranker-v2-m3
+BGE_RERANKER_API_KEY=dummy
+BGE_RERANKER_TOP_N=10
+
+[lq_qwen3_8B_lora]
+LQ_QWEN3_8B_LQ_LORA_SERVER_URL=http://192.168.91.253:9006/v1
+LQ_QWEN3_8B_LQ_LORA_MODEL_ID=Qwen3-8B-lq-lora
+LQ_QWEN3_8B_LQ_LORA_API_KEY=dummy
+
+
+
+[mysql]
+MYSQL_HOST=192.168.92.61
+MYSQL_PORT=13306
+MYSQL_USER=root
+MYSQL_PASSWORD=lq@123
+MYSQL_DB=lq_db
+MYSQL_MIN_SIZE=1
+MYSQL_MAX_SIZE=5
+MYSQL_AUTO_COMMIT=True
+
+
+[pgvector]
+PGVECTOR_HOST=124.223.140.149
+PGVECTOR_PORT=7432
+PGVECTOR_DB=vector_db
+PGVECTOR_USER=vector_user
+PGVECTOR_PASSWORD=pg16@123
+
+
+[milvus]
+MILVUS_HOST=192.168.92.96
+MILVUS_PORT=30129
+MILVUS_DB=lq_db
+MILVUS_COLLECTION=first_bfp_collection_test
+MILVUS_USER=
+MILVUS_PASSWORD=
+
+
+[hybrid_search]
+# 混合检索权重配置
+DENSE_WEIGHT=0.3
+SPARSE_WEIGHT=0.7
+
+
+# ============================================================
+# DashScope Qwen3.5 系列模型配置
+# ============================================================
+
+# DashScope Qwen3.5-35B-A3B 模型
+[qwen3_5_35b_a3b]
+DASHSCOPE_SERVER_URL=https://dashscope.aliyuncs.com/compatible-mode/v1
+DASHSCOPE_MODEL_ID=qwen3.5-35b-a3b
+DASHSCOPE_API_KEY=sk-98cca096416a41d5a6cec68b824486c5
+
+# DashScope Qwen3.5-27B 模型
+[qwen3_5_27b]
+DASHSCOPE_SERVER_URL=https://dashscope.aliyuncs.com/compatible-mode/v1
+DASHSCOPE_MODEL_ID=qwen3.5-27b
+DASHSCOPE_API_KEY=sk-98cca096416a41d5a6cec68b824486c5
+
+# DashScope Qwen3.5-122B-A10B 模型
+[qwen3_5_122b_a10b]
+DASHSCOPE_SERVER_URL=https://dashscope.aliyuncs.com/compatible-mode/v1
+DASHSCOPE_MODEL_ID=qwen3.5-122b-a10b
+DASHSCOPE_API_KEY=sk-98cca096416a41d5a6cec68b824486c5
+
+# ============================================================
+# LLM 通用配置
+# ============================================================
+
+[llm_keywords]
+TIMEOUT=60
+MAX_RETRIES=2
+CONCURRENT_WORKERS=20
+STREAM=false
+TEMPERATURE=0.3
+MAX_TOKENS=1024
+
+
+

+ 4 - 11
core/construction_review/component/doc_worker/config/config.yaml

@@ -76,17 +76,10 @@ header_footer_filter:
   # 页眉后第二行的中文字符数阈值(少于此数量时,连同页眉行和中间空行一起过滤)
   footer_line_chinese_char_threshold: 10
 
-# GLM-OCR 本地 API 配置
-# 【修改日期】2025-03-27: 替换 MinerU 配置为 GLM-OCR
-glm_ocr:
-  # API 地址
-  api_url: "http://183.220.37.46:25429/v1/chat/completions"
-  # 请求超时时间(秒)
-  timeout: 600
-  # 最大 token 数
-  max_tokens: 2048
-  # 温度参数
-  temperature: 0.1
+# 【注意】OCR 配置已迁移到 config.ini [ocr] 段
+# 请修改项目根目录 config.ini 文件中的 [ocr] 配置:
+#   ENGINE=glm_ocr 或 ENGINE=mineru
+# 本文件保留其他非 OCR 相关配置
 
 # 目录识别配置
 toc_detection:

+ 274 - 53
core/construction_review/component/doc_worker/pdf_worker/hybrid_extractor.py

@@ -28,6 +28,27 @@ from ..config.provider import default_config_provider
 from ..interfaces import DocumentSource, FullTextExtractor
 from .fulltext_extractor import PdfFullTextExtractor
 
+
+def _read_ini_config(section: str, key: str, default: Any = None) -> Any:
+    """从项目根目录的 config.ini 读取配置"""
+    try:
+        import configparser
+        from pathlib import Path
+        
+        # 查找项目根目录的 config.ini
+        config_path = Path(__file__).parent.parent.parent.parent.parent.parent / "config" / "config.ini"
+        if not config_path.exists():
+            return default
+        
+        config = configparser.ConfigParser()
+        config.read(config_path, encoding="utf-8")
+        
+        if section in config and key in config[section]:
+            return config[section][key]
+        return default
+    except Exception:
+        return default
+
 # 尝试导入 PIL 用于图片压缩
 try:
     from PIL import Image
@@ -59,29 +80,58 @@ class HybridFullTextExtractor(FullTextExtractor):
 
     def __init__(
         self,
-        layout_dpi: int = 180,
-        ocr_dpi: int = 220,
-        jpg_quality: int = 85,  # 降低为 85 配合 GLM-OCR
+        layout_dpi: int = 200,  # 【优化】统一 DPI 为 200,兼顾版面分析和 OCR 质量
+        ocr_dpi: int = 200,     # 【优化】与 layout_dpi 保持一致,避免重复渲染
+        jpg_quality: int = 90,
         api_url: Optional[str] = None,
         timeout: int = 600
     ) -> None:
         self._cfg = default_config_provider
         self.local_extractor = PdfFullTextExtractor()
         
-        # GLM-OCR 配置
-        self.api_url = api_url or self._cfg.get(
-            "glm_ocr.api_url", 
+        # 【新增】OCR 引擎选择配置
+        # 优先级:config.ini [ocr] ENGINE > 默认 glm_ocr
+        # 同时支持 "glm_ocr"/"glm-ocr" 和 "mineru"/"mineru-ocr" 等多种写法
+        raw_engine = _read_ini_config("ocr", "engine", "glm_ocr")
+        self.ocr_engine = raw_engine.lower().strip() if raw_engine else "glm_ocr"
+        
+        # 规范化引擎名称(统一转换为标准格式)
+        if self.ocr_engine in ("glm_ocr", "glm-ocr", "glmocr"):
+            self.ocr_engine_normalized = "glm_ocr"
+        elif self.ocr_engine in ("mineru", "mineru-ocr", "mineru_ocr"):
+            self.ocr_engine_normalized = "mineru"
+        else:
+            logger.warning(f"[HybridExtractor] 未知的 OCR 引擎 '{self.ocr_engine}',使用默认 glm_ocr")
+            self.ocr_engine_normalized = "glm_ocr"
+        
+        logger.info(f"[HybridExtractor] OCR 引擎配置: '{self.ocr_engine}' -> 使用: '{self.ocr_engine_normalized}'")
+        
+        # GLM-OCR 配置(从 config.ini 读取,兼容原有逻辑)
+        self.glm_api_url = api_url or _read_ini_config(
+            "ocr", "glm_ocr_api_url", 
             "http://183.220.37.46:25429/v1/chat/completions"
         )
-        self.timeout = timeout
-        self.headers = {"Content-Type": "application/json"}
+        self.glm_timeout = int(_read_ini_config("ocr", "glm_ocr_timeout", "600"))
+        self.glm_headers = {"Content-Type": "application/json"}
+        
+        # 【新增】MinerU 配置
+        self.mineru_api_url = _read_ini_config(
+            "ocr", "mineru_api_url",
+            "http://183.220.37.46:25428/file_parse"
+        )
+        self.mineru_timeout = int(_read_ini_config("ocr", "mineru_timeout", "300"))
         
-        # 飞浆版面分析配置
+        # 【优化】飞浆版面分析配置 - DPI 统一为 200
+        # 原理:版面分析和 OCR 使用相同 DPI,第一阶段渲染的图片可直接复用
         self.layout_dpi = layout_dpi
         self.ocr_dpi = ocr_dpi
         self.jpg_quality = jpg_quality
         self._layout_engine: Optional[Any] = None
         
+        # 【优化】图片缓存:版面分析阶段缓存 table 页图片,供 OCR 阶段复用
+        # 格式: {page_num: (width, height, jpeg_bytes)}
+        self._image_cache: Dict[int, tuple] = {}
+        
         # 外部注入的进度状态字典
         self._progress_state: Optional[dict] = None
         
@@ -98,16 +148,21 @@ class HybridFullTextExtractor(FullTextExtractor):
             self._layout_engine = RapidLayout()
         return self._layout_engine
 
-    def _detect_table_pages(self, doc: fitz.Document, dpi: int = 150) -> Set[int]:
+    def _detect_table_pages(self, doc: fitz.Document, dpi: int = 200) -> Set[int]:
         """
         使用飞浆 RapidLayout 检测所有页面,返回包含 table 区域的页码集合。
-        【保持不变】
+        
+        【优化】检测到 table 的页面,将 JPEG 图片缓存到 self._image_cache
+        供后续 OCR 阶段直接使用,避免重复渲染 PDF。
         """
         table_pages: Set[int] = set()
         layout_engine = self._get_layout_engine()
         total_pages = len(doc)
+        
+        # 清空图片缓存
+        self._image_cache.clear()
 
-        logger.debug(f"  [飞浆分析] 开始版面分析,共 {total_pages} 页...")
+        logger.info(f"  [飞浆分析] 开始版面分析,共 {total_pages} 页,DPI={dpi}(图片缓存已启用)")
 
         for page_num in range(1, total_pages + 1):
             page = doc[page_num - 1]
@@ -133,7 +188,17 @@ class HybridFullTextExtractor(FullTextExtractor):
                 # 判断是否包含 table
                 if "table" in labels:
                     table_pages.add(page_num)
-                    logger.debug(f"    第 {page_num} 页: 检测到 table 区域 -> 将走 GLM-OCR")
+                    
+                    # 【优化】缓存 table 页图片为 JPEG,供 OCR 阶段复用
+                    try:
+                        # 直接保存 Pixmap 的 JPEG 数据,无需 PIL 转换
+                        jpeg_bytes = pix.tobytes("jpeg")
+                        self._image_cache[page_num] = (pix.width, pix.height, jpeg_bytes)
+                        logger.debug(f"    第 {page_num} 页: 检测到 table -> 缓存图片 "
+                                   f"({pix.width}x{pix.height}, {len(jpeg_bytes)/1024:.1f} KB)")
+                    except Exception as cache_err:
+                        logger.warning(f"    第 {page_num} 页: 图片缓存失败 ({cache_err})")
+                        
                 else:
                     region_types = ", ".join(set(labels)) if labels else "无"
                     logger.debug(f"    第 {page_num} 页: {region_types}")
@@ -147,7 +212,9 @@ class HybridFullTextExtractor(FullTextExtractor):
                 self._progress_state['current'] = int(page_num / total_pages * 50)
                 self._progress_state['message'] = f"版面分析中:已分析 {page_num}/{total_pages} 页"
 
-        logger.debug(f"  [飞浆分析] 完成,共 {len(table_pages)} 页包含 table 区域: {sorted(table_pages)}")
+        cache_size_mb = sum(len(data[2]) for data in self._image_cache.values()) / 1024 / 1024
+        logger.info(f"  [飞浆分析] 完成: {len(table_pages)} 页 table,"
+                   f"缓存 {len(self._image_cache)} 页图片 ({cache_size_mb:.1f} MB)")
         return table_pages
 
     def extract_full_text(self, source: DocumentSource) -> List[Dict[str, Any]]:
@@ -156,7 +223,14 @@ class HybridFullTextExtractor(FullTextExtractor):
         1. 首先用飞浆 RapidLayout 检测所有页面的 table 区域
         2. 含有 table 的页面走 GLM-OCR
         3. 其他页面走本地 PyMuPDF 提取
+        
+        【统计信息】本方法会统计并输出总提取时间、OCR页数等信息
         """
+        # 记录总开始时间
+        total_start_time = time.time()
+        layout_analysis_time = 0.0
+        ocr_total_time = 0.0
+        
         # 打开文档
         if source.content is not None:
             doc = fitz.open(stream=io.BytesIO(source.content))
@@ -175,22 +249,28 @@ class HybridFullTextExtractor(FullTextExtractor):
             ocr_page_count = 0  # 统计需要OCR的页数
             
             # INFO级别:开始文档提取(方便查看主要流程)
-            logger.info(f"[文档提取] 开始处理,共 {total_pages} 页,使用混合模式(GLM-OCR)")
-            logger.debug(f"开始混合提取(飞浆版面分析 + GLM-OCR),共 {total_pages} 页...")
+            current_engine = "GLM-OCR" if self.ocr_engine_normalized == "glm_ocr" else "MinerU"
+            logger.info(f"[文档提取] 开始处理,共 {total_pages} 页,OCR引擎: {current_engine}")
+            logger.debug(f"开始混合提取(飞浆版面分析 + {current_engine}),共 {total_pages} 页...")
 
             if self._progress_state is not None:
                 self._progress_state['current'] = 0
                 self._progress_state['message'] = f"版面分析中:已分析 0/{total_pages} 页"
 
             # ========== 第一阶段:飞浆版面分析 ==========
+            layout_start_time = time.time()
             table_pages = self._detect_table_pages(doc, dpi=self.layout_dpi)
+            layout_analysis_time = time.time() - layout_start_time
             ocr_page_count = len(table_pages)
             
             # INFO级别:版面分析完成,显示OCR页数
             if ocr_page_count > 0:
-                logger.info(f"[文档提取] 版面分析完成,共 {ocr_page_count} 页需要OCR识别,{total_pages - ocr_page_count} 页直接提取")
+                logger.info(f"[文档提取] 版面分析完成,共 {ocr_page_count} 页需要OCR识别,"
+                           f"{total_pages - ocr_page_count} 页直接提取,"
+                           f"版面分析耗时: {layout_analysis_time:.2f}s")
             else:
-                logger.info(f"[文档提取] 版面分析完成,无扫描页,全部直接提取")
+                logger.info(f"[文档提取] 版面分析完成,无扫描页,全部直接提取,"
+                           f"版面分析耗时: {layout_analysis_time:.2f}s")
 
             # ========== 第二阶段:分流处理 ==========
             logger.debug(f"\n开始分流处理...")
@@ -199,13 +279,22 @@ class HybridFullTextExtractor(FullTextExtractor):
                 page_num = i + 1
                 
                 if page_num in table_pages:
-                    logger.debug(f"  [第 {page_num} 页] 检测到 table -> 走 GLM-OCR")
+                    # 【修改】根据配置选择 OCR 引擎
+                    # 使用规范化后的引擎名称(支持 glm_ocr/glm-ocr 和 mineru/mineru-ocr)
+                    is_glm_ocr = self.ocr_engine_normalized == "glm_ocr"
+                    ocr_name = "GLM-OCR" if is_glm_ocr else "MinerU"
+                    logger.debug(f"  [第 {page_num} 页] 检测到 table -> 走 {ocr_name}")
 
                     try:
-                        # 调用 GLM-OCR
-                        page_text = self._ocr_page_with_glm(page, page_num, source_file)
+                        # 根据配置调用不同的 OCR 引擎,并统计 OCR 时间
+                        ocr_start_time = time.time()
+                        if is_glm_ocr:
+                            page_text = self._ocr_page_with_glm(page, page_num, source_file)
+                        else:
+                            page_text = self._ocr_page_with_mineru(doc, page_num, source_file)
+                        ocr_total_time += time.time() - ocr_start_time
                     except Exception as e:
-                        logger.error(f"    GLM-OCR 失败,回退到本地提取: {e}")
+                        logger.error(f"    {ocr_name} 失败,回退到本地提取: {e}")
                         raw_text = page.get_text()
                         page_text = self.local_extractor._filter_header_footer(raw_text)
                 else:
@@ -232,10 +321,33 @@ class HybridFullTextExtractor(FullTextExtractor):
 
         finally:
             doc.close()
+            # 【优化】清理图片缓存,释放内存
+            if hasattr(self, '_image_cache'):
+                cache_size = len(self._image_cache)
+                self._image_cache.clear()
+                if cache_size > 0:
+                    logger.debug(f"  [缓存清理] 已清理 {cache_size} 页图片缓存")
         
-        # INFO级别:文档提取完成
+        # ========== 统计信息输出 ==========
+        # INFO级别:文档提取完成,输出详细统计
+        total_time = time.time() - total_start_time
         total_chars = sum(len(page['text']) for page in pages)
-        logger.info(f"[文档提取] 完成,共 {total_pages} 页,总字符数: {total_chars}")
+        
+        # 计算各类时间占比
+        ocr_avg_time = ocr_total_time / ocr_page_count if ocr_page_count > 0 else 0
+        local_pages = total_pages - ocr_page_count
+        
+        logger.info(
+            f"[文档提取] 完成统计 | "
+            f"总页数: {total_pages} | "
+            f"OCR页数: {ocr_page_count} | "
+            f"本地提取: {local_pages} | "
+            f"总耗时: {total_time:.2f}s | "
+            f"版面分析: {layout_analysis_time:.2f}s | "
+            f"OCR耗时: {ocr_total_time:.2f}s | "
+            f"OCR平均: {ocr_avg_time:.2f}s/页 | "
+            f"总字符数: {total_chars}"
+        )
 
         return pages
 
@@ -243,42 +355,41 @@ class HybridFullTextExtractor(FullTextExtractor):
         """
         将单页转为图片并调用 GLM-OCR 本地 API 识别
         
-        【逻辑来源】glm_ocr_api_extractor.py 最终实现版本
+        【优化】优先使用版面分析阶段缓存的图片,避免重复渲染
         
         流程:
-        1. PyMuPDF 渲染页面为图片(220 DPI
-        2. PIL 压缩图片(短边限制 1024px,JPEG 质量 85
-        3. Base64 编码
-        4. 构建 OpenAI 兼容格式请求
+        1. 优先使用缓存图片(如可用
+        2. 否则 PyMuPDF 渲染页面为图片(200 DPI
+        3. PIL 压缩图片(短边限制 1024px,JPEG 质量 90)
+        4. Base64 编码
         5. POST 请求 GLM-OCR API
         6. 解析响应并转换 HTML→Markdown
-        
-        请求格式:
-        {
-            "model": "GLM-OCR",
-            "messages": [{
-                "role": "user",
-                "content": [
-                    {"type": "text", "text": "提示词"},
-                    {"type": "image_url", "image_url": {"url": "data:image/jpeg;base64,..."}}
-                ]
-            }],
-            "max_tokens": 2048,
-            "temperature": 0.1
-        }
         """
         start_time = time.time()
         
+        # 【优化】检查是否有缓存图片
+        cached = self._image_cache.get(page_num)
+        use_cache = cached is not None
+        
         # INFO级别:开始调用GLM-OCR识别(方便查看主要流程)
-        logger.info(f"[GLM-OCR] 开始识别第 {page_num} 页(扫描页)")
+        cache_info = "(使用缓存图片)" if use_cache else ""
+        logger.info(f"[GLM-OCR] 开始识别第 {page_num} 页 {cache_info}")
         
         try:
-            # 1. 渲染为图片
-            pix = page.get_pixmap(dpi=self.ocr_dpi)
-            img_bytes = pix.tobytes("jpeg")
-            original_kb = len(img_bytes) / 1024
-            
-            logger.debug(f"    [GLM-OCR] 第 {page_num} 页图片: {original_kb:.1f} KB ({pix.width}x{pix.height})")
+            # 1. 获取图片(优先使用缓存)
+            if use_cache:
+                # 【优化】使用版面分析阶段缓存的图片
+                width, height, img_bytes = cached
+                original_kb = len(img_bytes) / 1024
+                logger.debug(f"    [GLM-OCR] 第 {page_num} 页使用缓存图片: "
+                           f"{original_kb:.1f} KB ({width}x{height})")
+            else:
+                # 兜底:重新渲染(理论上不会发生,因为 table 页都应已缓存)
+                pix = page.get_pixmap(dpi=self.ocr_dpi)
+                img_bytes = pix.tobytes("jpeg")
+                original_kb = len(img_bytes) / 1024
+                logger.warning(f"    [GLM-OCR] 第 {page_num} 页无缓存,重新渲染: "
+                             f"{original_kb:.1f} KB ({pix.width}x{pix.height})")
             
             # 2. 压缩图片
             compressed_bytes = self._compress_image(img_bytes)
@@ -313,10 +424,10 @@ class HybridFullTextExtractor(FullTextExtractor):
             
             # 5. 调用 GLM-OCR API
             response = requests.post(
-                self.api_url,
-                headers=self.headers,
+                self.glm_api_url,
+                headers=self.glm_headers,
                 json=payload,
-                timeout=self.timeout
+                timeout=self.glm_timeout
             )
             response.raise_for_status()
             
@@ -338,6 +449,116 @@ class HybridFullTextExtractor(FullTextExtractor):
             logger.error(f"    [GLM-OCR] 第 {page_num} 页识别失败: {e}")
             raise
 
+    def _ocr_page_with_mineru(self, doc: fitz.Document, page_num: int, original_filename: str) -> str:
+        """
+        【新增】使用 MinerU 本地 API 识别单页
+        
+        流程:
+        1. 【优化】优先使用版面分析缓存的图片(JPEG)
+        2. 无缓存时,提取单页为临时 PDF 文件
+        3. 调用 MinerU API 上传识别
+        4. 提取 Markdown 内容
+        5. 清理临时文件
+        
+        Args:
+            doc: 原始 PDF 文档对象
+            page_num: 页码(1-based)
+            original_filename: 原始文件名(用于日志)
+            
+        Returns:
+            str: 识别出的 Markdown 文本
+        """
+        import tempfile
+        import os
+        
+        start_time = time.time()
+        
+        # 【优化】检查是否有缓存图片
+        cached = self._image_cache.get(page_num)
+        use_cache = cached is not None
+        
+        # INFO级别:开始识别
+        cache_info = "(使用缓存图片)" if use_cache else ""
+        logger.info(f"[MinerU] 开始识别第 {page_num} 页 {cache_info}")
+        
+        tmp_pdf_path = None
+        
+        try:
+            # 【优化】优先使用缓存的图片数据
+            if use_cache:
+                width, height, img_bytes = cached
+                logger.debug(f"    [MinerU] 第 {page_num} 页使用缓存图片: "
+                           f"{len(img_bytes)/1024:.1f} KB ({width}x{height})")
+                
+                # 使用图片直接上传(MinerU 支持图片格式)
+                files = {'files': (f"page_{page_num}.jpg", io.BytesIO(img_bytes))}
+                response = requests.post(
+                    self.mineru_api_url,
+                    files=files,
+                    timeout=self.mineru_timeout
+                )
+            else:
+                # 兜底:提取单页为临时 PDF
+                logger.debug(f"    [MinerU] 第 {page_num} 页无缓存,创建临时 PDF")
+                
+                single_page_doc = fitz.open()
+                single_page_doc.insert_pdf(doc, from_page=page_num-1, to_page=page_num-1)
+                
+                # 创建临时文件
+                with tempfile.NamedTemporaryFile(suffix=".pdf", delete=False) as tmp_file:
+                    tmp_pdf_path = tmp_file.name
+                
+                single_page_doc.save(tmp_pdf_path)
+                single_page_doc.close()
+                
+                file_size_kb = os.path.getsize(tmp_pdf_path) / 1024
+                logger.debug(f"    [MinerU] 第 {page_num} 页临时文件: {file_size_kb:.1f} KB")
+                
+                # 调用 MinerU API
+                with open(tmp_pdf_path, 'rb') as f:
+                    files = {'files': (f"page_{page_num}.pdf", f)}
+                    response = requests.post(
+                        self.mineru_api_url,
+                        files=files,
+                        timeout=self.mineru_timeout
+                    )
+            
+            if response.status_code != 200:
+                raise RuntimeError(f"MinerU API error: {response.status_code} - {response.text[:200]}")
+            
+            # 3. 解析结果
+            result = response.json()
+            content = ""
+            
+            if "results" in result and isinstance(result["results"], dict):
+                for filename, file_data in result["results"].items():
+                    if isinstance(file_data, dict) and "md_content" in file_data:
+                        content = file_data["md_content"]
+                        break
+            
+            # 4. 处理 HTML 转 Markdown(如果包含 HTML 标签)
+            if "<table" in content.lower() or "<div" in content.lower():
+                logger.debug(f"    [MinerU] 检测到 HTML 标签,转换为 Markdown")
+                content = self._process_raw_content(content)
+            
+            elapsed = time.time() - start_time
+            logger.info(f"[MinerU] 第 {page_num} 页识别完成,耗时: {elapsed:.2f}s,字符数: {len(content)}")
+            
+            return content
+            
+        except Exception as e:
+            logger.error(f"    [MinerU] 第 {page_num} 页识别失败: {e}")
+            raise
+            
+        finally:
+            # 清理临时文件
+            if tmp_pdf_path and os.path.exists(tmp_pdf_path):
+                try:
+                    os.remove(tmp_pdf_path)
+                    logger.debug(f"    [MinerU] 清理临时文件: {tmp_pdf_path}")
+                except:
+                    pass
+
     def _compress_image(self, img_bytes: bytes) -> bytes:
         """
         压缩图片至 GLM-OCR 要求的尺寸限制内