retrieval.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. import asyncio
  2. import json
  3. from typing import List, Dict, Any, Optional
  4. from foundation.ai.models.rerank_model import rerank_model
  5. from foundation.observability.monitoring.time_statistics import track_execution_time
  6. from foundation.infrastructure.config.config import config_handler
  7. from foundation.observability.logger.loggering import server_logger
  8. from foundation.database.base.vector.milvus_vector import MilvusVectorManager
  9. class RetrievalManager:
  10. """
  11. 召回管理器,实现多路召回功能
  12. """
  13. def __init__(self):
  14. """
  15. 初始化召回管理器
  16. """
  17. self.vector_manager = MilvusVectorManager()
  18. self.logger = server_logger
  19. self.dense_weight = config_handler.get('hybrid_search', 'DENSE_WEIGHT', 0.7)
  20. self.sparse_weight = config_handler.get('hybrid_search', 'SPARSE_WEIGHT', 0.3)
  21. # 重排序模型配置(从 [model] 部分统一管理)
  22. self.rerank_model_type = config_handler.get('model', 'RERANK_MODEL_TYPE', 'bge_rerank_model')
  23. self.logger.info(f"初始化重排序模型类型: {self.rerank_model_type}")
  24. def set_rerank_model(self, model_type: str):
  25. """
  26. 设置重排序模型类型
  27. Args:
  28. model_type: 配置section名称 ('bge_rerank_model', 'lq_rerank_model', 'silicoflow_rerank_model')
  29. """
  30. valid_models = ['bge_rerank_model', 'lq_rerank_model', 'silicoflow_rerank_model']
  31. if model_type not in valid_models:
  32. raise ValueError(f"model_type 必须是 {valid_models}")
  33. self.rerank_model_type = model_type
  34. self.logger.info(f"重排序模型类型已设置为: {model_type}")
  35. def _clean_document(self, doc: str) -> str:
  36. """
  37. 清理文档文本,移除HTML标签和特殊字符
  38. Args:
  39. doc: 原始文档文本
  40. Returns:
  41. str: 清理后的文档文本
  42. """
  43. if not isinstance(doc, str):
  44. self.logger.debug(f"文档类型转换: {type(doc)} -> str")
  45. return str(doc)
  46. original_length = len(doc)
  47. # 移除HTML标签
  48. import re
  49. doc = re.sub(r'<[^>]+>', '', doc)
  50. # 移除多余的空白字符
  51. doc = re.sub(r'\s+', ' ', doc)
  52. # 更宽松的字符过滤 - 保留更多字符
  53. doc = re.sub(r'[^\u4e00-\u9fff\w\s.,;:!?()()。,;:!?\-\+\=\*/%&@#¥$【】「」""''""\n\r]', '', doc)
  54. # 截断过长的文本
  55. if len(doc) > 8000: # 设置最大长度限制
  56. doc = doc[:8000] + "..."
  57. cleaned_doc = doc.strip()
  58. self.logger.debug(f"文档清理: {original_length} -> {len(cleaned_doc)} 字符")
  59. return cleaned_doc
  60. def _get_rerank_results(self, query_text: str, documents: List[str], top_k: int = None) -> List[Dict[str, Any]]:
  61. """
  62. 根据配置选择重排序模型并执行重排序
  63. Args:
  64. query_text: 查询文本
  65. documents: 文档列表
  66. top_k: 返回结果数量
  67. Returns:
  68. List[Dict]: 重排序后的结果列表
  69. """
  70. try:
  71. # 清理和验证文档列表
  72. cleaned_documents = []
  73. valid_original_docs = []
  74. for doc in documents:
  75. if doc and isinstance(doc, str) and doc.strip():
  76. cleaned_doc = self._clean_document(doc)
  77. if cleaned_doc and len(cleaned_doc) > 3:
  78. cleaned_documents.append(cleaned_doc)
  79. valid_original_docs.append(doc)
  80. if not cleaned_documents:
  81. return []
  82. # 根据配置section名称路由到对应的reranker方法
  83. if self.rerank_model_type == 'lq_rerank_model':
  84. self.logger.info("使用本地 Qwen3-Reranker-8B (lq_rerank_model) 进行重排序")
  85. rerank_results = rerank_model.lq_rerank(query_text, cleaned_documents, top_k)
  86. elif self.rerank_model_type == 'silicoflow_rerank_model':
  87. self.logger.info("使用硅基流动 Qwen3-Reranker-8B (silicoflow_rerank_model) 进行重排序")
  88. rerank_results = rerank_model.qwen3_rerank(query_text, cleaned_documents, top_k)
  89. else: # bge_rerank_model (默认)
  90. self.logger.info("使用 BGE Reranker (bge_rerank_model) 进行重排序")
  91. rerank_results = rerank_model.bge_rerank(query_text, cleaned_documents, top_k)
  92. # 将清理后的文本映射回原始文本(所有reranker都需要)
  93. for result in rerank_results:
  94. cleaned_text = result.get('text', '')
  95. # 查找原始文本
  96. for i, cleaned in enumerate(cleaned_documents):
  97. if cleaned == cleaned_text:
  98. result['text'] = valid_original_docs[i]
  99. break
  100. # 统一字段名:将 relevance_score 转换为 score
  101. if 'relevance_score' in result and 'score' not in result:
  102. result['score'] = float(result['relevance_score'])
  103. return rerank_results
  104. except Exception as e:
  105. self.logger.error(f"重排序失败,模型类型: {self.rerank_model_type}, 错误: {str(e)}")
  106. # 返回原始顺序作为fallback
  107. return [{"text": doc, "score": 0.0} for i, doc in enumerate(documents[:top_k])]
  108. @track_execution_time
  109. async def entity_recall(self, main_entity: str, assisted_search_entity: list,
  110. recall_top_k: int = 5, max_results: int = None) -> List[str]:
  111. """
  112. 执行实体召回
  113. Args:
  114. main_entity: 主查询实体
  115. assisted_search_entity: 辅助搜索实体列表
  116. recall_top_k: 每次单实体召回返回的数量(默认5)
  117. max_results: 最终返回的最大数量,如果为None则返回所有召回结果(默认None)
  118. Returns:
  119. List[str]: 实体文本内容列表
  120. Note:
  121. 实际返回数量 = min(max_results, 主实体召回数 + 所有辅助实体召回数)
  122. 如果不设置max_results,可能返回较多结果(取决于辅助实体数量)
  123. """
  124. self.logger.info(f"[entity_recall] 开始召回, recall_top_k={recall_top_k}, max_results={max_results}, 主实体='{main_entity}', 辅助实体数量={len(assisted_search_entity)}")
  125. collection_name = "first_bfp_collection_entity"
  126. # 主实体搜索 - 使用异步方法
  127. entity_result = await self.async_multi_stage_recall(
  128. collection_name=collection_name,
  129. query_text=main_entity,
  130. hybrid_top_k=50,
  131. top_k=recall_top_k
  132. )
  133. self.logger.info(f"[entity_recall] 主实体召回完成, 返回 {len(entity_result)} 个结果")
  134. assist_tasks = [
  135. self.async_multi_stage_recall(
  136. collection_name=collection_name,
  137. query_text=assisted_search_entity,
  138. hybrid_top_k=50,
  139. top_k=recall_top_k
  140. ) for assisted_search_entity in assisted_search_entity
  141. ]
  142. # 辅助搜索,异步并发
  143. assist_results_list = await asyncio.gather(*assist_tasks,return_exceptions=True)
  144. assist_results = []
  145. for res in assist_results_list:
  146. if isinstance(res, Exception):
  147. self.logger.error(f"辅助实体召回失败: {str(res)}")
  148. else:
  149. assist_results.extend(res)
  150. all_results = entity_result + assist_results
  151. # if self.rerank_model_type == 'silicoflow_rerank_model':
  152. # with open("temp\entity_bfp_recall\silicoflow_rerank_model.json", "w", encoding="utf-8") as f:
  153. # json.dump(all_results, f, ensure_ascii=False, indent=4)
  154. # elif self.rerank_model_type == 'lq_rerank_model':
  155. # with open("temp\entity_bfp_recall\lq_rerank_model.json", "w", encoding="utf-8") as f:
  156. # json.dump(all_results, f, ensure_ascii=False, indent=4)
  157. # 去重并提取文本内容
  158. entity_list = list(set([item['text_content'] for item in all_results]))
  159. # 如果设置了max_results,进行截断
  160. if max_results is not None and len(entity_list) > max_results:
  161. entity_list = entity_list[:max_results]
  162. self.logger.info(f"[entity_recall] 结果截断到 max_results={max_results}")
  163. self.logger.info(f"entity_list_len:{len(entity_list)}")
  164. return entity_list
  165. @track_execution_time
  166. async def async_bfp_recall(self, entity_list: List[str],background: str ,
  167. top_k: int = 3,) -> List[Dict[str, Any]]:
  168. """
  169. 混合搜索召回 - 向量+BM25召回
  170. Args:
  171. entity_list: 实体列表
  172. background: 背景/上下文信息,用于二次重排
  173. top_k: 返回结果数量
  174. """
  175. import time
  176. start_time = time.time()
  177. self.logger.info(f"[async_bfp_recall] 开始召回, top_k={top_k}, 实体数量={len(entity_list)}, 背景='{background[:50]}...'")
  178. # 异步并发召回编制依据
  179. collection_name = "rag_children_hybrid"
  180. gather_start = time.time()
  181. # 优化:降低hybrid_top_k参数从50到20,减少混合搜索时间
  182. bfp_tasks = [
  183. self.async_multi_stage_recall(
  184. collection_name=collection_name,
  185. query_text=entity,
  186. hybrid_top_k=10, # 从50降到20,减少60%的混合搜索时间
  187. top_k=top_k
  188. ) for entity in entity_list
  189. ]
  190. bfp_tasks_list = await asyncio.gather(*bfp_tasks,return_exceptions=True)
  191. gather_end = time.time()
  192. bfp_results = []
  193. for res in bfp_tasks_list:
  194. if isinstance(res, Exception):
  195. self.logger.error(f"辅助实体召回失败: {str(res)}")
  196. else:
  197. bfp_results.extend(res)
  198. self.logger.info(f"[async_bfp_recall] 第一阶段召回完成, 共召回 {len(bfp_results)} 个文档")
  199. # BFP召回结果已经通过multi_stage_recall进行了重排序,保持原有顺序
  200. # 只对第一次重排序得分大于0.8的文档进行二次重排序
  201. high_score_results = [item for item in bfp_results if (item.get('rerank_score') or 0) > 0.8]
  202. low_score_results = [item for item in bfp_results if (item.get('rerank_score') or 0) <= 0.8]
  203. self.logger.info(f"筛选结果:高分文档(>0.8) {len(high_score_results)} 个,低分文档(≤0.8) {len(low_score_results)} 个")
  204. # 如果没有高分文档,直接返回top_k个结果(按hybrid_similarity排序)
  205. if not high_score_results:
  206. self.logger.info(f"没有得分大于0.8的文档,跳过二次重排序,返回top_k={top_k}个结果(按hybrid_similarity排序)")
  207. # 按 hybrid_similarity 降序排序,返回 top_k 个
  208. sorted_results = sorted(bfp_results, key=lambda x: x.get('hybrid_similarity') or 0, reverse=True)
  209. return sorted_results[:top_k]
  210. # 检查background是否为空,如果为空则跳过二次重排序
  211. if not background or not background.strip():
  212. self.logger.warning("background为空,跳过二次重排序,直接返回高分文档")
  213. return high_score_results
  214. # 提取高分文档的文本内容用于二次重排(保持顺序去重)
  215. seen_texts = set()
  216. high_score_text_content = []
  217. for item in high_score_results:
  218. text = item['text_content']
  219. if text not in seen_texts:
  220. seen_texts.add(text)
  221. high_score_text_content.append(text)
  222. self.logger.info(f"提取高分文档文本内容,共 {len(high_score_text_content)} 个,准备二次重排")
  223. # 二次重排 - 使用配置的重排序模型
  224. rerank_start = time.time()
  225. # 使用传入的 top_k 参数,而不是硬编码为5
  226. bfp_rerank_result = self._get_rerank_results(background, high_score_text_content, top_k)
  227. rerank_end = time.time()
  228. self.logger.info(f"二次重排序耗时: {rerank_end - rerank_start:.3f}秒, top_k={top_k}")
  229. # 根据重排结果重新组织数据
  230. reorganize_start = time.time()
  231. final_results = []
  232. # 构建 text_content -> 原始文档列表 的映射(保留所有匹配项)
  233. text_to_items = {}
  234. for item in high_score_results:
  235. text = item['text_content']
  236. if text not in text_to_items:
  237. text_to_items[text] = []
  238. text_to_items[text].append(item)
  239. # 处理二次重排序的高分文档
  240. added_texts = set() # 用于跟踪已添加的文本,避免重复
  241. for rerank_item in bfp_rerank_result:
  242. text = rerank_item.get('text', '')
  243. parent_id = rerank_item.get('parent_id', '')
  244. score = rerank_item.get('score', 0.0)
  245. if text in text_to_items and text not in added_texts:
  246. # 获取该文本的所有候选文档,选择 rerank_score 最高的
  247. candidates = text_to_items[text]
  248. best_candidate = max(candidates, key=lambda x: x.get('rerank_score', 0.0))
  249. result_item = best_candidate.copy()
  250. result_item['bfp_rerank_score'] = score
  251. result_item['bfp_rerank_parent_id'] = parent_id
  252. final_results.append(result_item)
  253. added_texts.add(text) # 标记该文本已添加
  254. reorganize_end = time.time()
  255. total_time = reorganize_end - start_time
  256. self.logger.info(f"结果重组耗时: {reorganize_end - reorganize_start:.3f}秒")
  257. self.logger.info(f"二次重排完成,返回 {len(final_results)} 个高分文档(top_k={top_k}),丢弃 {len(low_score_results)} 个低分文档")
  258. self.logger.info(f"[async_bfp_recall] 总耗时: {total_time:.3f}秒 (召回: {gather_end-gather_start:.3f}s + 重排: {rerank_end-rerank_start:.3f}s + 其他: {total_time-(gather_end-gather_start)-(rerank_end-rerank_start):.3f}s)")
  259. return final_results
  260. def hybrid_search_recall(self, collection_name: str, query_text: str,
  261. top_k: int = 10 , ranker_type: str = "weighted",
  262. dense_weight: float = 0.7, sparse_weight: float = 0.3) -> List[Dict[str, Any]]:
  263. """
  264. 混合搜索召回 - 向量+BM25召回
  265. Args:
  266. collection_name: 集合名称
  267. query_text: 查询文本
  268. top_k: 返回结果数量
  269. ranker_type: 重排序类型 "weighted" 或 "rrf"
  270. dense_weight: 密集向量权重
  271. sparse_weight: 稀疏向量权重
  272. Returns:
  273. List[Dict]: 搜索结果列表
  274. """
  275. try:
  276. self.logger.info(f"开始混合检索")
  277. param = {'collection_name': collection_name}
  278. # 直接调用同步的混合搜索(在同步方法中)
  279. results = self.vector_manager.hybrid_search(
  280. param=param,
  281. query_text=query_text,
  282. top_k=top_k,
  283. ranker_type=ranker_type,
  284. dense_weight=dense_weight,
  285. sparse_weight=sparse_weight
  286. )
  287. # 详细记录混合搜索结果
  288. self.logger.info(f"混合搜索召回返回 {len(results)} 个结果")
  289. return results
  290. except Exception as e:
  291. self.logger.error(f"混合搜索召回失败: {str(e)}")
  292. return []
  293. def rerank_recall(self, candidates_with_metadata: List[Dict[str, Any]], query_text: str,
  294. top_k: int = None ) -> List[Dict[str, Any]]:
  295. """
  296. 重排序召回 - 使用配置的重排序模型对候选文档重新排序
  297. Args:
  298. candidates_with_metadata: 候选文档列表,包含文本内容和元数据
  299. query_text: 查询文本
  300. top_k: 返回结果数量
  301. Returns:
  302. List[Dict]: 重排序后的结果列表,包含原始索引信息
  303. """
  304. try:
  305. # 第一步:基于文本内容+元数据的组合去重
  306. unique_candidates = []
  307. original_indices_map = [] # 记录每个去重后的候选文档对应的原始索引列表
  308. unique_combinations = set() # 记录已见过的文本+元数据组合
  309. for original_index, candidate in enumerate(candidates_with_metadata):
  310. text_content = candidate.get('text_content', '')
  311. metadata = candidate.get('metadata', {})
  312. # 处理嵌套的metadata字符串
  313. title = ''
  314. file = ''
  315. if 'metadata' in metadata and isinstance(metadata['metadata'], str):
  316. import json
  317. try:
  318. # 解析JSON格式的metadata
  319. inner_metadata = json.loads(metadata['metadata'])
  320. title = inner_metadata.get('title', '')
  321. file = inner_metadata.get('file', '')
  322. except (json.JSONDecodeError, TypeError):
  323. pass
  324. else:
  325. title = metadata.get('title', '')
  326. file = metadata.get('file', '')
  327. # 创建组合键:文本内容 + 关键元数据
  328. combination_key = (text_content, title, file)
  329. if combination_key not in unique_combinations:
  330. # 新的唯一组合
  331. unique_candidates.append(candidate)
  332. original_indices_map.append([original_index])
  333. unique_combinations.add(combination_key)
  334. else:
  335. # 找到对应的唯一候选并添加索引
  336. for unique_idx, unique_candidate in enumerate(unique_candidates):
  337. if unique_candidate.get('text_content', '') == text_content:
  338. # 解析唯一候选的元数据
  339. unique_metadata = unique_candidate.get('metadata', {})
  340. unique_title = ''
  341. unique_file = ''
  342. if 'metadata' in unique_metadata and isinstance(unique_metadata['metadata'], str):
  343. import json
  344. try:
  345. inner_metadata = json.loads(unique_metadata['metadata'])
  346. unique_title = inner_metadata.get('title', '')
  347. unique_file = inner_metadata.get('file', '')
  348. except (json.JSONDecodeError, TypeError):
  349. pass
  350. else:
  351. unique_title = unique_metadata.get('title', '')
  352. unique_file = unique_metadata.get('file', '')
  353. if unique_title == title and unique_file == file:
  354. original_indices_map[unique_idx].append(original_index)
  355. break
  356. # 提取唯一候选文档的文本内容用于重排序
  357. unique_texts = [candidate.get('text_content', '') for candidate in unique_candidates]
  358. # 使用配置的重排序模型进行重排序
  359. rerank_results = self._get_rerank_results(query_text, unique_texts, top_k)
  360. # 转换结果格式,使用索引映射来处理原始索引
  361. scored_docs = []
  362. for i, api_result in enumerate(rerank_results):
  363. rerank_text = api_result.get('text', '')
  364. rerank_score = float(api_result.get('score', '0.0'))
  365. # 根据 rerank_text 在 unique_candidates 中查找匹配项
  366. # (rerank 会改变顺序,不能直接用索引 i)
  367. found_index = None
  368. original_candidate = None
  369. for idx, candidate in enumerate(unique_candidates):
  370. if candidate.get('text_content', '') == rerank_text:
  371. found_index = idx
  372. original_candidate = candidate
  373. break
  374. if original_candidate is None:
  375. self.logger.warning(f"[rerank_recall] 未找到匹配的候选文档,跳过: {rerank_text[:50]}...")
  376. continue
  377. # 使用找到的索引获取原始索引映射
  378. original_index = original_indices_map[found_index][0] if found_index < len(original_indices_map) else i
  379. # 获取原始混合搜索的评分信息
  380. hybrid_distance = original_candidate.get('distance', 0.0)
  381. hybrid_similarity = original_candidate.get('similarity', 0.0)
  382. # 解析元数据获取标题用于日志
  383. metadata = original_candidate.get('metadata', {})
  384. title = 'N/A'
  385. if 'metadata' in metadata and isinstance(metadata['metadata'], str):
  386. try:
  387. import json
  388. inner_metadata = json.loads(metadata['metadata'])
  389. title = inner_metadata.get('title', 'N/A')
  390. except:
  391. pass
  392. scored_docs.append({
  393. 'text_content': rerank_text,
  394. 'metadata': original_candidate.get('metadata', {}), # 保留原始元数据
  395. 'rerank_score': rerank_score,
  396. 'original_index': original_index,
  397. 'rerank_rank': i,
  398. 'duplicate_count': len(original_indices_map[i]), # 记录重复数量
  399. 'hybrid_distance': hybrid_distance, # 保留原始混合搜索评分
  400. 'hybrid_similarity': hybrid_similarity
  401. })
  402. return scored_docs
  403. except Exception as e:
  404. self.logger.error(f"重排序召回失败: {str(e)}")
  405. return []
  406. def multi_stage_recall(self, collection_name: str, query_text: str,
  407. hybrid_top_k: int = 50, top_k: int = 10,
  408. ranker_type: str = "weighted") -> List[Dict[str, Any]]:
  409. """
  410. 多路召回 - 先混合搜索召回,再重排序,只返回重排序结果
  411. Args:
  412. collection_name: 集合名称
  413. query_text: 查询文本
  414. hybrid_top_k: 混合搜索召回的文档数量
  415. top_k: 最终返回的文档数量
  416. ranker_type: 混合搜索的重排序类型
  417. Returns:
  418. List[Dict]: 重排序后的结果列表,只包含重排序分数
  419. """
  420. try:
  421. self.logger.info(f"执行多路召回")
  422. # 第一阶段:混合搜索召回(向量+BM25)
  423. hybrid_results = self.hybrid_search_recall(
  424. collection_name=collection_name,
  425. query_text=query_text,
  426. top_k=hybrid_top_k,
  427. ranker_type=ranker_type
  428. )
  429. if not hybrid_results:
  430. self.logger.warning("混合搜索召回无结果,返回空列表")
  431. return []
  432. # 第二阶段:重排序召回,传递完整的混合搜索结果(包含元数据)
  433. rerank_results = self.rerank_recall(
  434. candidates_with_metadata=hybrid_results,
  435. query_text=query_text,
  436. top_k=top_k
  437. )
  438. # 优化重排序结果的元数据结构
  439. final_results = []
  440. for rerank_result in rerank_results:
  441. metadata = rerank_result.get('metadata', {}).copy()
  442. duplicate_count = rerank_result.get('duplicate_count', 1)
  443. # 如果内层有metadata字段,将其提取到外层
  444. if 'metadata' in metadata and isinstance(metadata['metadata'], str):
  445. import json
  446. try:
  447. # 解析JSON格式的metadata
  448. inner_metadata = json.loads(metadata['metadata'])
  449. metadata.update(inner_metadata)
  450. # 移除内层的metadata字符串,避免重复
  451. del metadata['metadata']
  452. except (json.JSONDecodeError, TypeError):
  453. # 如果解析失败,保持原样
  454. pass
  455. # 移除重复的content字段
  456. if 'content' in metadata:
  457. del metadata['content']
  458. # 添加重复计数信息到元数据中
  459. if duplicate_count > 1:
  460. metadata['duplicate_count'] = duplicate_count
  461. # 输出优化后的结果,包含双重评分
  462. final_result = {
  463. 'text_content': rerank_result['text_content'],
  464. 'metadata': metadata,
  465. 'hybrid_similarity': rerank_result.get('hybrid_similarity', 0.0), # 混合搜索相似度
  466. 'rerank_score': rerank_result.get('rerank_score', 0.0) # BGE重排序评分
  467. }
  468. final_results.append(final_result)
  469. self.logger.debug(f"元数据优化完成: 重排序排名{rerank_result.get('rerank_rank')}, 重复数量={duplicate_count}")
  470. return final_results
  471. except Exception as e:
  472. self.logger.error(f"多路召回失败: {str(e)}")
  473. return []
  474. async def async_multi_stage_recall(self, collection_name: str, query_text: str,
  475. hybrid_top_k: int = 50, top_k: int = 10,
  476. ranker_type: str = "weighted") -> List[Dict[str, Any]]:
  477. """
  478. 多路召回 - 先混合搜索召回,再重排序,只返回重排序结果
  479. Args:
  480. collection_name: 集合名称
  481. query_text: 查询文本
  482. hybrid_top_k: 混合搜索召回的文档数量
  483. top_k: 最终返回的文档数量
  484. ranker_type: 混合搜索的重排序类型
  485. Returns:
  486. List[Dict]: 重排序后的结果列表,只包含重排序分数
  487. """
  488. import time
  489. try:
  490. start_time = time.time()
  491. # 第一阶段:混合搜索召回(向量+BM25)
  492. hybrid_results = await asyncio.to_thread(
  493. self.hybrid_search_recall,
  494. collection_name=collection_name,
  495. query_text=query_text,
  496. top_k=hybrid_top_k,
  497. ranker_type=ranker_type
  498. )
  499. if not hybrid_results:
  500. return []
  501. # 第二阶段:重排序召回
  502. rerank_results = self.rerank_recall(
  503. candidates_with_metadata=hybrid_results,
  504. query_text=query_text,
  505. top_k=top_k
  506. )
  507. # 优化重排序结果的元数据结构
  508. final_results = []
  509. for rerank_result in rerank_results:
  510. metadata = rerank_result.get('metadata', {}).copy()
  511. duplicate_count = rerank_result.get('duplicate_count', 1)
  512. # 如果内层有metadata字段,将其提取到外层
  513. if 'metadata' in metadata and isinstance(metadata['metadata'], str):
  514. import json
  515. try:
  516. # 解析JSON格式的metadata
  517. inner_metadata = json.loads(metadata['metadata'])
  518. metadata.update(inner_metadata)
  519. # 移除内层的metadata字符串,避免重复
  520. del metadata['metadata']
  521. except (json.JSONDecodeError, TypeError):
  522. # 如果解析失败,保持原样
  523. pass
  524. # 移除重复的content字段
  525. if 'content' in metadata:
  526. del metadata['content']
  527. # 添加重复计数信息到元数据中
  528. if duplicate_count > 1:
  529. metadata['duplicate_count'] = duplicate_count
  530. # 输出优化后的结果,包含双重评分
  531. final_result = {
  532. 'text_content': rerank_result['text_content'],
  533. 'metadata': metadata,
  534. 'hybrid_similarity': rerank_result.get('hybrid_similarity', 0.0), # 混合搜索相似度
  535. 'rerank_score': rerank_result.get('rerank_score', 0.0) # BGE重排序评分
  536. }
  537. final_results.append(final_result)
  538. self.logger.debug(f"元数据优化完成: 重排序排名{rerank_result.get('rerank_rank')}, 重复数量={duplicate_count}")
  539. return final_results
  540. except Exception as e:
  541. self.logger.error(f"多路召回失败: {str(e)}")
  542. return []
  543. # 创建全局召回管理器实例
  544. retrieval_manager = RetrievalManager()