chenkun 3 settimane fa
parent
commit
bda3f65cdb

File diff suppressed because it is too large
+ 1239 - 2
logs/lq-admin-app.log.1


File diff suppressed because it is too large
+ 0 - 318
logs/lq-admin-app.log.5


+ 1 - 1
src/app/api/v1/document/knowledge_base.py

@@ -23,7 +23,7 @@ router = APIRouter()
 @router.get("", response_model=PaginatedResponseSchema)
 async def get_knowledge_bases(
     page: int = Query(1, ge=1, description="页码"),
-    page_size: int = Query(10, ge=1, le=100, description="每页数量"),
+    page_size: int = Query(10, ge=1, le=1000, description="每页数量"),
     keyword: str = Query(None, description="搜索关键词"),
     status: str = Query(None, description="状态筛选"),
     db: AsyncSession = Depends(get_db)

+ 1 - 1
src/app/sample/models/base_info.py

@@ -96,7 +96,7 @@ class DocumentMain(BaseModel):
     __tablename__ = "t_samp_document_main"
 
     id = Column(String(36), primary_key=True, comment="主键")
-    source_type = Column(Enum('standard', 'construction_plan', 'regulation'), nullable=False, comment="所属类型")
+    source_type = Column(Enum('standard', 'construction_plan', 'regulation', 'other'), nullable=False, comment="所属类型")
     title = Column(String(255), nullable=False, comment="文档名称")
     conversion_status = Column(Integer, nullable=False, default=0, comment="状态: 0-待转换, 1-转换中, 2-完成, 3-失败")
     whether_to_enter = Column(Integer, nullable=False, default=0, comment="是否入库: 0-未入库, 1-已入库")

+ 1 - 0
src/app/sample/schemas/sample_schemas.py

@@ -64,6 +64,7 @@ class DocumentAdd(BaseModel):
     file_url: Optional[str] = None
     json_url: Optional[str] = None
     file_extension: Optional[str] = None
+    kb_id: Optional[str] = None
 
 class DocumentListRequest(BaseModel):
     page: int = 1

+ 1 - 1
src/app/schemas/base.py

@@ -27,7 +27,7 @@ class ResponseSchema(BaseSchema):
 class PaginationSchema(BaseSchema):
     """分页Schema"""
     page: int = Field(default=1, ge=1, description="页码")
-    page_size: int = Field(default=20, ge=1, le=100, description="每页数量")
+    page_size: int = Field(default=20, ge=1, le=1000, description="每页数量")
     total: int = Field(default=0, description="总数量")
     total_pages: int = Field(default=0, description="总页数")
 

+ 2 - 0
src/app/server/app.py

@@ -146,6 +146,7 @@ app.add_middleware(
 )
 
 # --- 调试中间件 ---
+"""
 @app.middleware("http")
 async def log_requests(request: Request, call_next):
     # logger.info(f"收到请求: {request.method} {request.url}")
@@ -156,6 +157,7 @@ async def log_requests(request: Request, call_next):
     except Exception as e:
         logger.error(f"请求处理异常: {e}")
         raise
+"""
 # ------------------
 
 

+ 15 - 42
src/app/services/knowledge_base_service.py

@@ -219,39 +219,6 @@ class KnowledgeBaseService:
     ) -> Tuple[List[KnowledgeBase], PaginationSchema]:
         """获取知识库列表"""
         
-        # --- 同步 Milvus 数据 (简化版:仅更新现有KB的计数和状态) ---
-        try:
-            # 1. 获取 Milvus 所有集合
-            milvus_names = milvus_service.client.list_collections()
-            
-            # 2. 获取 DB 中已有的集合
-            result = await db.execute(select(KnowledgeBase).where(KnowledgeBase.is_deleted == 0))
-            existing_kbs = result.scalars().all()
-            
-            # 3. 更新现有KB的统计
-            has_changes = False
-            for kb in existing_kbs:
-                total_count = 0
-                
-                # 统计 collection_name_parent
-                if kb.collection_name_parent and kb.collection_name_parent in milvus_names:
-                    total_count += await self._get_collection_row_count(kb.collection_name_parent)
-                    
-                # 统计 collection_name_children
-                if kb.collection_name_children and kb.collection_name_children in milvus_names:
-                    total_count += await self._get_collection_row_count(kb.collection_name_children)
-                    
-                if kb.document_count != total_count:
-                    kb.document_count = total_count
-                    has_changes = True
-
-            if has_changes:
-                await db.commit()
-
-        except Exception as e:
-            print(f"Sync Milvus collections failed: {e}")
-        # ----------------------
-
         # 查询未删除的 KB
         query = select(KnowledgeBase).where(KnowledgeBase.is_deleted == 0)
         
@@ -278,14 +245,19 @@ class KnowledgeBaseService:
         items = result.scalars().all()
 
         # 设置 is_synced (辅助字段,不存库)
-        milvus_names_set = set(milvus_service.client.list_collections())
-        for item in items:
-            c1_ok = item.collection_name_parent in milvus_names_set
-            c2_ok = True
-            if item.collection_name_children:
-                c2_ok = item.collection_name_children in milvus_names_set
-            
-            item.is_synced = c1_ok and c2_ok
+        try:
+            milvus_names_set = set(milvus_service.client.list_collections())
+            for item in items:
+                c1_ok = item.collection_name_parent in milvus_names_set
+                c2_ok = True
+                if item.collection_name_children:
+                    c2_ok = item.collection_name_children in milvus_names_set
+                
+                item.is_synced = c1_ok and c2_ok
+        except Exception as e:
+            print(f"Check Milvus sync status failed: {e}")
+            for item in items:
+                item.is_synced = False
         
         meta = PaginationSchema(
             page=page,
@@ -506,9 +478,10 @@ class KnowledgeBaseService:
         fields = [
             {"name": "pk", "type": "INT64", "is_primary": True, "description": "主键"},
             {"name": "text", "type": "VARCHAR", "max_length": 65535, "description": "内容"},
-            {"name": "vector", "type": "FLOAT_VECTOR", "description": "向量列"},
+            {"name": "dense", "type": "FLOAT_VECTOR", "description": "向量列"},
             {"name": "sparse", "type": "BM25", "description": "内容的BM25关键字检索"},
             {"name": "document_id", "type": "VARCHAR", "max_length": 128, "description": "样本中心上传文档ID"},
+            {"name": "kb_id", "type": "VARCHAR", "max_length": 128, "description": "知识库ID"},
             {"name": "parent_id", "type": "VARCHAR", "max_length": 128, "description": "父段ID"},
             {"name": "index", "type": "INT64", "description": "索引序号"},
             {"name": "tag_list", "type": "VARCHAR", "max_length": 2048, "description": "标签"},

+ 32 - 44
src/app/services/milvus_service.py

@@ -39,23 +39,23 @@ class MilvusService:
             self.ensure_collection_exists(name)
 
     async def insert_knowledge(self, content: str, doc_info: Dict[str, Any]):
-        """将 Markdown 内容切分并入库 (支持父子段分表)"""
+        """将 Markdown 内容切分并入库 (支持通过路由到明确的父子集合)"""
         try:
             doc_id = doc_info.get("doc_id")
             doc_name = doc_info.get("doc_name")
-            doc_version = doc_info.get("doc_version", int(time.time()))
-            tags = doc_info.get("tags", "")
-            user_id = doc_info.get("user_id", "system")
-            
             kb_method = doc_info.get("kb_method")
-            target_collection = doc_info.get("collection_name") or PARENT_COLLECTION_NAME
+            
+            # 获取明确的集合名称 (由业务层从数据库查出)
+            parent_col = doc_info.get("collection_name_parent") or PARENT_COLLECTION_NAME
+            child_col = doc_info.get("collection_name_children") or CHILD_COLLECTION_NAME
             
             from langchain_text_splitters import RecursiveCharacterTextSplitter
 
             if kb_method == "parent_child":
-                # --- 方案 A: 父子段分表入库 ---
-                parent_col = f"{target_collection}_parent"
-                child_col = f"{target_collection}_child"
+                # --- 方案 A: 父子段分表入库 (双写模式) ---
+                # 确保两个集合都存在
+                self.ensure_collection_exists(parent_col)
+                self.ensure_collection_exists(child_col)
                 
                 # 1. 切分父段 (较大块)
                 parent_splitter = RecursiveCharacterTextSplitter(
@@ -86,10 +86,6 @@ class MilvusService:
                         # 子段的 parent_id 指向父段的 p_id
                         c_metadata = self._prepare_metadata(doc_info, p_id, c_idx, p_id)
                         child_docs.append(Document(page_content=c_content, metadata=c_metadata))
-
-                # 确保两个集合都存在
-                self.ensure_collection_exists(parent_col)
-                self.ensure_collection_exists(child_col)
                 
                 # 分别入库
                 if parent_docs:
@@ -100,7 +96,7 @@ class MilvusService:
                 logger.info(f"Successfully inserted parent-child chunks for {doc_name}: {len(parent_docs)} parents -> {len(child_docs)} children")
             
             else:
-                # --- 常规单表入库逻辑 ---
+                # --- 方案 B: 常规单表入库 (只进子表,parent_id 设为空) ---
                 chunks = []
                 if kb_method == "length":
                     splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
@@ -113,6 +109,7 @@ class MilvusService:
                     )
                     chunks = splitter.split_text(content)
                 else:
+                    # 默认按双换行切分
                     chunks = [p.strip() for p in re.split(r"\n\s*\n+", content) if p.strip()]
                 
                 if not chunks:
@@ -122,13 +119,15 @@ class MilvusService:
                 documents = []
                 for idx, chunk in enumerate(chunks):
                     p_id = hashlib.sha1(f"{doc_id}_{idx}".encode()).hexdigest()
-                    metadata = self._prepare_metadata(doc_info, p_id, idx, p_id)
+                    # 对于单表模式,parent_id 设为空字符串
+                    metadata = self._prepare_metadata(doc_info, p_id, idx, "")
                     documents.append(Document(page_content=chunk, metadata=metadata))
 
-                self.ensure_collection_exists(target_collection)
-                get_milvus_vectorstore(target_collection).add_documents(documents)
+                # 确保子表集合存在
+                self.ensure_collection_exists(child_col)
+                get_milvus_vectorstore(child_col).add_documents(documents)
                 
-                logger.info(f"Successfully inserted {len(documents)} chunks for {doc_name} into {target_collection}")
+                logger.info(f"Successfully inserted {len(documents)} chunks for {doc_name} into {child_col} (kb_method: {kb_method})")
 
         except Exception as e:
             logger.error(f"Error inserting knowledge into Milvus: {e}")
@@ -141,9 +140,11 @@ class MilvusService:
         doc_version = doc_info.get("doc_version", int(time.time()))
         tags = doc_info.get("tags", "")
         user_id = doc_info.get("user_id", "system")
+        kb_id = doc_info.get("kb_id", "")
         
         return {
             "document_id": doc_id,
+            "kb_id": kb_id,
             "parent_id": parent_ref_id,
             "index": index,
             "tag_list": tags,
@@ -173,6 +174,7 @@ class MilvusService:
             schema.add_field("dense", DataType.FLOAT_VECTOR, dim=self.DENSE_DIM)
             schema.add_field("sparse", DataType.SPARSE_FLOAT_VECTOR)
             schema.add_field("document_id", DataType.VARCHAR, max_length=256)
+            schema.add_field("kb_id", DataType.VARCHAR, max_length=256)
             schema.add_field("parent_id", DataType.VARCHAR, max_length=256)
             schema.add_field("index", DataType.INT64)
             schema.add_field("tag_list", DataType.VARCHAR, max_length=2048)
@@ -225,22 +227,9 @@ class MilvusService:
             )
             needs_index = True
 
-        if "permission" in fields_in_collection and "permission" not in existing_indexes:
-            index_params.add_index(
-                field_name="permission",
-                index_type="INVERTED",
-                params={"json_cast_type": "VARCHAR"}
-            )
-            needs_index = True
+        # [Optimized] 移除对 JSON 字段的冗余索引创建逻辑,避免在 Milvus 2.4+ 环境下因缺少参数报错
+        # 同时确保 core index (dense/sparse) 命名与 create_collection 保持一致
         
-        if "metadata" in fields_in_collection and "metadata" not in existing_indexes:
-            index_params.add_index(
-                field_name="metadata",
-                index_type="INVERTED",
-                params={"json_cast_type": "VARCHAR"}
-            )
-            needs_index = True
-
         if needs_index:
             logger.info(f"Creating missing indexes for collection: {name}")
             try:
@@ -281,10 +270,10 @@ class MilvusService:
                 # 如果没有定义主键,添加默认主键
                 schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True, auto_id=True)
             
-            # 检查是否有默认向量列,如果没有则添加 (兼容旧逻辑,但如果fields里有vector则不添加)
+            # 检查是否有默认向量列,如果没有则添加 (兼容旧逻辑)
             has_vector = any(f.get("type") == "FLOAT_VECTOR" for f in fields)
             if not has_vector:
-                schema.add_field(field_name="vector", datatype=DataType.FLOAT_VECTOR, dim=dimension)
+                schema.add_field(field_name="dense", datatype=DataType.FLOAT_VECTOR, dim=dimension)
             
             # 3. 添加用户自定义字段
             type_map = {
@@ -359,15 +348,18 @@ class MilvusService:
             # 5. 为所有向量字段添加索引
             for f in fields:
                 ftype = f.get("type", "").upper()
+                fname = f.get("name")
                 if ftype == "FLOAT_VECTOR":
                     index_params.add_index(
-                        field_name=f.get("name"), 
+                        field_name=fname, 
+                        index_name=f"{fname}_idx", # 显式命名,如 dense_idx,确保与 ensure_collection_exists 一致
                         index_type="AUTOINDEX",
                         metric_type="IP" # [Modified] 更改为 IP (内积),通常对规范化向量效果更好,与 COSINE 类似但更简单
                     )
                 elif ftype == "BM25" or ftype == "SPARSE_FLOAT_VECTOR":
                     index_params.add_index(
-                        field_name=f.get("name"),
+                        field_name=fname,
+                        index_name="bm25_idx", # 显式命名,确保与 ensure_collection_exists 一致
                         index_type="SPARSE_INVERTED_INDEX", # 稀疏向量索引
                         metric_type="BM25"
                     )
@@ -382,13 +374,9 @@ class MilvusService:
                         index_type="INVERTED"
                     )
                 elif ftype == "JSON":
-                    # Milvus 2.4+ JSON 索引必须指定 json_cast_type
-                    # 这里为 JSON 字段添加默认索引,以便支持查询
-                    index_params.add_index(
-                        field_name=f.get("name"),
-                        index_type="INVERTED",
-                        params={"json_cast_type": "VARCHAR"}
-                    )
+                    # JSON 字段索引在某些环境下存在兼容性问题(如缺少 json_cast_type 报错),
+                    # 考虑到目前主要通过表达式过滤 JSON,且非核心性能瓶颈,暂时不自动创建 JSON 索引。
+                    pass
 
             # 7. 创建集合
             self.client.create_collection(

+ 101 - 47
src/app/services/sample_service.py

@@ -111,7 +111,7 @@ class SampleService:
         Args:
             doc_ids: 文档ID列表
             username: 操作人
-            kb_id: 知识库ID
+            kb_id: 知识库ID (可选,若不传则根据 source_type 自动匹配)
             kb_method: 切分方法
         """
         conn = get_db_connection()
@@ -129,7 +129,7 @@ class SampleService:
             # 1. 获取所有选中选中的文档详情
             placeholders = ','.join(['%s']*len(doc_ids))
             fetch_sql = f"""
-                SELECT id, title, source_type, md_url, conversion_status, whether_to_enter, created_time 
+                SELECT id, title, source_type, md_url, conversion_status, whether_to_enter, created_time, kb_id 
                 FROM t_samp_document_main 
                 WHERE id IN ({placeholders})
             """
@@ -146,6 +146,7 @@ class SampleService:
                 status = doc.get('conversion_status')
                 whether_to_enter = doc.get('whether_to_enter', 0)
                 md_url = doc.get('md_url')
+                source_type = doc.get('source_type')
                 
                 # A. 检查是否已入库
                 if whether_to_enter == 1:
@@ -168,7 +169,38 @@ class SampleService:
                     error_details.append(f"· {title}: 转换结果地址丢失")
                     continue
                 
-                # B. 从 MinIO 获取 Markdown 内容
+                # C. 确定入库策略 (严格使用弹窗传入的参数)
+                # 不从数据库读取旧的 kb_method,保证入库逻辑由本次操作决定
+                current_kb_id = kb_id or doc.get('kb_id')
+                current_kb_method = kb_method  # 直接使用前端传来的切分方式
+
+                if not current_kb_id:
+                    logger.warning(f"文档 {title}({doc_id}) 未指定知识库,跳过入库")
+                    failed_count += 1
+                    error_details.append(f"· {title}: 未指定目标知识库")
+                    continue
+
+                if not current_kb_method:
+                    logger.warning(f"文档 {title}({doc_id}) 未指定切分方式,跳过入库")
+                    failed_count += 1
+                    error_details.append(f"· {title}: 未指定切分策略")
+                    continue
+
+                # 获取知识库信息 (collection_name_parent, collection_name_children)
+                kb_sql = "SELECT collection_name_parent, collection_name_children FROM t_samp_knowledge_base WHERE id = %s AND is_deleted = 0"
+                cursor.execute(kb_sql, (current_kb_id,))
+                kb_res = cursor.fetchone()
+                
+                if not kb_res:
+                    logger.warning(f"找不到指定的知识库: id={current_kb_id}")
+                    failed_count += 1
+                    error_details.append(f"· {title}: 指定的知识库不存在或已被删除")
+                    continue
+                
+                collection_name_parent = kb_res['collection_name_parent']
+                collection_name_children = kb_res['collection_name_children']
+                
+                # D. 从 MinIO 获取 Markdown 内容
                 try:
                     md_content = self.minio_manager.get_object_content(md_url)
                     if not md_content:
@@ -179,39 +211,32 @@ class SampleService:
                     error_details.append(f"· {title}: 读取云端文件失败")
                     continue
                 
-                # C. 调用 MilvusService 进行切分和入库
+                # E. 调用 MilvusService 进行切分和入库
                 try:
-                    # 如果有 kb_id,需要根据它获取 collection_name
-                    collection_name = None
-                    if kb_id:
-                        kb_sql = "SELECT collection_name FROM t_samp_knowledge_base WHERE id = %s"
-                        cursor.execute(kb_sql, (kb_id,))
-                        kb_res = cursor.fetchone()
-                        if kb_res:
-                            collection_name = kb_res['collection_name']
-                    
                     # 准备元数据
+                    current_date = int(datetime.now().strftime('%Y%m%d'))
                     doc_info = {
                         "doc_id": doc_id,
                         "doc_name": title,
-                        "doc_version": int(doc['created_time'].strftime('%Y%m%d')) if doc.get('created_time') else 20260127,
-                        "tags": doc.get('source_type') or 'unknown',
+                        "doc_version": int(doc['created_time'].strftime('%Y%m%d')) if doc.get('created_time') else current_date,
+                        "tags": source_type or 'unknown',
                         "user_id": username,  # 传递操作人作为 created_by
-                        "kb_id": kb_id,
-                        "kb_method": kb_method,
-                        "collection_name": collection_name
+                        "kb_id": current_kb_id,
+                        "kb_method": current_kb_method,
+                        "collection_name_parent": collection_name_parent,
+                        "collection_name_children": collection_name_children
                     }
                     await self.milvus_service.insert_knowledge(md_content, doc_info)
                     
-                    # D. 添加到任务管理中心 (类型为 data)
+                    # F. 添加到任务管理中心 (类型为 data)
                     try:
                         await task_service.add_task(doc_id, 'data')
                     except Exception as task_err:
                         logger.error(f"添加文档 {title} 到任务中心失败: {task_err}")
 
-                    # E. 更新数据库状态
+                    # G. 更新数据库状态
                     update_sql = "UPDATE t_samp_document_main SET whether_to_enter = 1, kb_id = %s, kb_method = %s, updated_by = %s, updated_time = NOW() WHERE id = %s"
-                    cursor.execute(update_sql, (kb_id, kb_method, username, doc_id))
+                    cursor.execute(update_sql, (current_kb_id, current_kb_method, username, doc_id))
                     success_count += 1
                     
                 except Exception as milvus_err:
@@ -385,8 +410,9 @@ class SampleService:
                     LEFT JOIN {sub_table} s ON m.id = s.id
                     LEFT JOIN t_sys_user u1 ON m.created_by = u1.id
                     LEFT JOIN t_sys_user u2 ON m.updated_by = u2.id
+                    LEFT JOIN t_samp_knowledge_base kb ON m.kb_id = kb.id
                 """
-                fields_sql = "m.*, s.*, u1.username as creator_name, u2.username as updater_name, m.id as id"
+                fields_sql = "m.*, s.*, u1.username as creator_name, u2.username as updater_name, kb.name as kb_name, m.id as id"
                 where_clauses.append("m.source_type = %s")
                 params.append(table_type)
                 order_sql = "m.created_time DESC"
@@ -407,8 +433,8 @@ class SampleService:
                         where_clauses.append("s.level_4_classification = %s")
                         params.append(level_4_classification)
             else:
-                from_sql = "t_samp_document_main m LEFT JOIN t_sys_user u1 ON m.created_by = u1.id LEFT JOIN t_sys_user u2 ON m.updated_by = u2.id"
-                fields_sql = "m.*, u1.username as creator_name, u2.username as updater_name"
+                from_sql = "t_samp_document_main m LEFT JOIN t_sys_user u1 ON m.created_by = u1.id LEFT JOIN t_sys_user u2 ON m.updated_by = u2.id LEFT JOIN t_samp_knowledge_base kb ON m.kb_id = kb.id"
+                fields_sql = "m.*, u1.username as creator_name, u2.username as updater_name, kb.name as kb_name"
                 order_sql = "m.created_time DESC"
                 title_field = "m.title"
             
@@ -431,7 +457,6 @@ class SampleService:
             sql = f"SELECT {fields_sql} FROM {from_sql} {where_sql} ORDER BY {order_sql} LIMIT %s OFFSET %s"
             params.extend([size, offset])
             
-            logger.info(f"Executing SQL: {sql} with params: {params}")
             cursor.execute(sql, tuple(params))
             items = [self._format_document_row(row) for row in cursor.fetchall()]
             
@@ -546,12 +571,13 @@ class SampleService:
                 INSERT INTO t_samp_document_main (
                     id, title, source_type, file_url, 
                     file_extension, created_by, updated_by, created_time, updated_time,
-                    conversion_status, whether_to_task
-                ) VALUES (%s, %s, %s, %s, %s, %s, %s, NOW(), NOW(), 0, 0)
+                    conversion_status, whether_to_task, kb_id
+                ) VALUES (%s, %s, %s, %s, %s, %s, %s, NOW(), NOW(), 0, 0, %s)
                 """,
                 (
                     doc_id, doc_data.get('title'), table_type, file_url,
-                    doc_data.get('file_extension'), user_id, user_id
+                    doc_data.get('file_extension'), user_id, user_id,
+                    doc_data.get('kb_id')
                 )
             )
 
@@ -648,14 +674,14 @@ class SampleService:
             # 1. 更新主表
             cursor.execute(
                 """
-                UPDATE t_samp_document_main 
-                SET title = %s, file_url = %s, file_extension = %s,
-                    updated_by = %s, updated_time = NOW()
+                UPDATE t_samp_document_main SET 
+                    title = %s, file_url = %s, file_extension = %s, 
+                    updated_by = %s, updated_time = NOW(), kb_id = %s
                 WHERE id = %s
                 """,
                 (
                     doc_data.get('title'), file_url, doc_data.get('file_extension'),
-                    updater_id, doc_id
+                    updater_id, doc_data.get('kb_id'), doc_id
                 )
             )
 
@@ -754,7 +780,7 @@ class SampleService:
                     s.participating_units, s.reference_basis,
                     s.created_by, u1.username as creator_name, s.created_time,
                     s.updated_by, u2.username as updater_name, s.updated_time,
-                    m.file_url, m.conversion_status, m.md_url, m.json_url
+                    m.file_url, m.conversion_status, m.md_url, m.json_url, m.kb_id, m.whether_to_enter
                 """
                 field_map = {
                     'title': 's.chinese_name',
@@ -778,7 +804,7 @@ class SampleService:
                     s.note, 
                     s.created_by, u1.username as creator_name, s.created_time,
                     s.updated_by, u2.username as updater_name, s.updated_time,
-                    m.file_url, m.conversion_status, m.md_url, m.json_url
+                    m.file_url, m.conversion_status, m.md_url, m.json_url, m.kb_id, m.whether_to_enter
                 """
                 field_map = {
                     'title': 's.plan_name',
@@ -799,7 +825,7 @@ class SampleService:
                     s.note, 
                     s.created_by, u1.username as creator_name, s.created_time,
                     s.updated_by, u2.username as updater_name, s.updated_time,
-                    m.file_url, m.conversion_status, m.md_url, m.json_url
+                    m.file_url, m.conversion_status, m.md_url, m.json_url, m.kb_id, m.whether_to_enter
                 """
                 field_map = {
                     'title': 's.file_name',
@@ -860,11 +886,12 @@ class SampleService:
             
             # 使用 LEFT JOIN 关联主表和用户表获取姓名
             sql = f"""
-                SELECT {fields} 
+                SELECT {fields}, kb.name as kb_name
                 FROM {table_name} s
                 LEFT JOIN t_samp_document_main m ON s.id = m.id
                 LEFT JOIN t_sys_user u1 ON s.created_by = u1.id
                 LEFT JOIN t_sys_user u2 ON s.updated_by = u2.id
+                LEFT JOIN t_samp_knowledge_base kb ON m.kb_id = kb.id
                 {where_sql} 
                 ORDER BY s.created_time DESC 
                 LIMIT %s OFFSET %s
@@ -1008,12 +1035,12 @@ class SampleService:
                 INSERT INTO t_samp_document_main (
                     id, title, source_type, file_url, 
                     file_extension, created_by, updated_by, created_time, updated_time,
-                    conversion_status, whether_to_task
-                ) VALUES (%s, %s, %s, %s, %s, %s, %s, NOW(), NOW(), 0, 0)
+                    conversion_status, whether_to_task, kb_id
+                ) VALUES (%s, %s, %s, %s, %s, %s, %s, NOW(), NOW(), 0, 0, %s)
                 """,
                 (
                     doc_id, data.get('title'), type, file_url,
-                    file_extension, user_id, user_id
+                    file_extension, user_id, user_id, data.get('kb_id')
                 )
             )
             
@@ -1122,10 +1149,10 @@ class SampleService:
             cursor.execute(
                 """
                 UPDATE t_samp_document_main 
-                SET title = %s, file_url = %s, file_extension = %s, updated_by = %s, updated_time = NOW()
+                SET title = %s, file_url = %s, file_extension = %s, updated_by = %s, updated_time = NOW(), kb_id = %s
                 WHERE id = %s
                 """,
-                (data.get('title'), file_url, file_extension, updater_id, doc_id)
+                (data.get('title'), file_url, file_extension, updater_id, data.get('kb_id'), doc_id)
             )
 
             # 2. 更新子表 (移除 file_url)
@@ -1200,6 +1227,10 @@ class SampleService:
 
     async def delete_basic_info(self, type: str, doc_id: str) -> Tuple[bool, str]:
         """删除基本信息"""
+        if not doc_id:
+            return False, "缺少 ID 参数"
+            
+        logger.info(f"Deleting basic info: type={type}, id={doc_id}")
         conn = get_db_connection()
         if not conn:
             return False, "数据库连接失败"
@@ -1210,21 +1241,44 @@ class SampleService:
             if not table_name:
                 return False, "无效的类型"
             
-            # 1. 删除主表记录 (由于设置了 ON DELETE CASCADE,子表记录会自动删除)
+            # 1. 显式删除子表记录 (防止 CASCADE 未生效)
+            try:
+                cursor.execute(f"DELETE FROM {table_name} WHERE id = %s", (doc_id,))
+                logger.info(f"Deleted from sub-table {table_name}, affected: {cursor.rowcount}")
+            except Exception as sub_e:
+                logger.warning(f"删除子表 {table_name} 记录失败 (可能不存在): {sub_e}")
+
+            # 2. 同步删除任务管理中心的数据 (优先删除关联数据)
+            try:
+                # 使用当前事务删除任务记录(如果 task_service 支持的话,目前它自建连接)
+                # 这里我们直接在当前 cursor 中也执行一次,确保事务一致性
+                cursor.execute("DELETE FROM t_task_management WHERE business_id = %s", (doc_id,))
+                logger.info(f"Deleted from t_task_management, affected: {cursor.rowcount}")
+            except Exception as task_e:
+                logger.warning(f"在主事务中删除任务记录失败: {task_e}")
+
+            # 3. 删除主表记录
             cursor.execute("DELETE FROM t_samp_document_main WHERE id = %s", (doc_id,))
+            affected_main = cursor.rowcount
+            logger.info(f"Deleted from t_samp_document_main, affected: {affected_main}")
             
-            # 同步删除任务管理中心的数据
+            if affected_main == 0:
+                logger.warning(f"未找到主表记录: {doc_id}")
+                # 即使主表没找到,我们也 commit 之前的操作并返回成功(幂等性)
+            
+            conn.commit()
+            
+            # 4. 再次确保任务中心数据已删除 (调用原有服务)
             try:
                 await task_service.delete_task(doc_id)
             except Exception as task_err:
-                logger.error(f"同步删除任务中心数据失败 (ID: {doc_id}): {task_err}")
+                logger.error(f"调用 task_service 删除任务失败: {task_err}")
 
-            conn.commit()
             return True, "删除成功"
         except Exception as e:
-            logger.exception("删除基本信息失败")
+            logger.exception(f"删除基本信息异常 (ID: {doc_id})")
             conn.rollback()
-            return False, str(e)
+            return False, f"删除失败: {str(e)}"
         finally:
             cursor.close()
             conn.close()

+ 5 - 1
src/app/services/search_engine_service.py

@@ -38,11 +38,15 @@ class SearchEngineService:
         from sqlalchemy import text
         try:
             # 简单判断是否是 UUID 格式或数字 ID,尝试查询数据库
-            kb_query = text("SELECT collection_name FROM t_samp_knowledge_base WHERE id = :kb_id OR collection_name = :kb_id")
+            # 修改 collection_name 为 collection_name_parent,并增加对 children 的兼容
+            kb_query = text("SELECT collection_name_parent FROM t_samp_knowledge_base WHERE id = :kb_id OR collection_name_parent = :kb_id OR collection_name_children = :kb_id")
             kb_res = await db.execute(kb_query, {"kb_id": original_kb_id})
             kb_row = kb_res.fetchone()
             if kb_row:
                 collection_name = kb_row[0]
+                # 如果是 parent_child 模式,剥离 _parent 后缀供后面拼接
+                if collection_name and collection_name.endswith('_parent'):
+                    collection_name = collection_name[:-7]
                 logging.info(f"Resolved kb_id {original_kb_id} to collection_name: {collection_name}")
         except Exception as db_err:
             logging.warning(f"Failed to resolve kb_id {original_kb_id} from database: {db_err}")

+ 53 - 121
src/app/services/system_service.py

@@ -265,10 +265,6 @@ class SystemService:
             
             menus = []
             for row in menu_rows:
-                # 过滤掉不想要的菜单
-                if "文档处理中心" in str(row['title']):
-                    continue
-                    
                 menu = {
                     "id": row['id'],
                     "parent_id": row['parent_id'],
@@ -285,8 +281,8 @@ class SystemService:
                 }
                 menus.append(menu)
             
-            # 构建菜单树前,过滤掉 button 类型的项,侧边栏只显示 parent 和 menu 类型
-            sidebar_menus = [m for m in menus if m.get("menu_type") in ["parent", "menu"]]
+            # 构建菜单树前,过滤掉 button 类型的项,侧边栏不显示按钮
+            sidebar_menus = [m for m in menus if m.get("menu_type") != "button"]
             menu_tree = self._build_menu_tree(sidebar_menus)
             
             return menu_tree
@@ -317,25 +313,19 @@ class SystemService:
             cursor.execute(f"SELECT COUNT(*) as count FROM t_sys_menu m WHERE {where_clause}", params)
             total = cursor.fetchone()['count']
             
-            # 查询菜单列表,包含创建人和修改人信息
+            # 查询菜单列表
             cursor.execute(f"""
                 SELECT m.id, m.parent_id, m.name, m.title, m.path, m.component,
                        m.icon, m.sort_order, m.menu_type, m.is_hidden, m.is_active,
-                       m.description, m.created_time, m.updated_time, m.created_by, m.updated_by,
-                       pm.title as parent_title,
-                       cu.username as created_by_name,
-                       uu.username as updated_by_name
+                       m.description, m.created_time, m.updated_time,
+                       pm.title as parent_title
                 FROM t_sys_menu m
                 LEFT JOIN t_sys_menu pm ON m.parent_id = pm.id
-                LEFT JOIN t_sys_user cu ON m.created_by = cu.id
-                LEFT JOIN t_sys_user uu ON m.updated_by = uu.id
                 WHERE {where_clause}
                 ORDER BY 
                     CASE WHEN m.parent_id IS NULL THEN 0 ELSE 1 END,
                     m.sort_order, 
-                    CASE WHEN m.menu_type = 'parent' THEN 0 
-                         WHEN m.menu_type = 'menu' THEN 1 
-                         ELSE 2 END,
+                    CASE WHEN m.menu_type = 'menu' THEN 0 ELSE 1 END,
                     m.created_time
                 LIMIT %s OFFSET %s
             """, params + [page_size, (page - 1) * page_size])
@@ -357,10 +347,6 @@ class SystemService:
                     "description": row['description'],
                     "created_time": row['created_time'].isoformat() if row['created_time'] else None,
                     "updated_time": row['updated_time'].isoformat() if row['updated_time'] else None,
-                    "created_by": row['created_by'],
-                    "updated_by": row['updated_by'],
-                    "created_by_name": row['created_by_name'],
-                    "updated_by_name": row['updated_by_name'],
                     "parent_title": row['parent_title']
                 }
                 menus.append(menu)
@@ -771,37 +757,27 @@ class SystemService:
         if not menu_ids:
             return []
         
-        # 获取所有相关菜单的信息
-        placeholders = ','.join(['%s'] * len(menu_ids))
-        cursor.execute(f"""
-            SELECT id, parent_id FROM t_sys_menu 
-            WHERE id IN ({placeholders}) AND is_active = 1
-        """, menu_ids)
-        
-        menus = cursor.fetchall()
         result_menu_ids = set(menu_ids)
+        current_ids = set(menu_ids)
         
-        # 递归添加父菜单
-        def add_parent_menu(menu_id: str):
-            # 查找父菜单
-            cursor.execute("""
-                SELECT parent_id FROM t_sys_menu 
-                WHERE id = %s AND is_active = 1
-            """, (menu_id,))
-            
-            parent_result = cursor.fetchone()
-            if parent_result and parent_result['parent_id']:
-                parent_id = parent_result['parent_id']
-                if parent_id not in result_menu_ids:
-                    result_menu_ids.add(parent_id)
-                    # 递归添加父菜单的父菜单
-                    add_parent_menu(parent_id)
-        
-        # 为每个选中的菜单添加其父菜单
-        for menu in menus:
-            if menu['parent_id']:
-                add_parent_menu(menu['id'])
-        
+        while current_ids:
+            # 一次性查询当前层级所有菜单的 parent_id
+            placeholders = ','.join(['%s'] * len(current_ids))
+            cursor.execute(f"""
+                SELECT DISTINCT parent_id FROM t_sys_menu 
+                WHERE id IN ({placeholders}) AND parent_id IS NOT NULL AND parent_id != ''
+            """, list(current_ids))
+            
+            parents = {row['parent_id'] for row in cursor.fetchall()}
+            
+            # 找出尚未在结果集中的父 ID
+            new_parents = parents - result_menu_ids
+            if not new_parents:
+                break
+                
+            result_menu_ids.update(new_parents)
+            current_ids = new_parents
+            
         return list(result_menu_ids)
     
     # ==================== 菜单管理 ====================
@@ -820,20 +796,6 @@ class SystemService:
             if cursor.fetchone():
                 return False, "菜单标识已存在"
             
-            # 验证菜单类型
-            menu_type = menu_data.get('menu_type', 'menu')
-            if menu_type not in ['parent', 'menu', 'button']:
-                return False, "无效的菜单类型,只支持:parent(父菜单)、menu(菜单项)、button(功能按钮)"
-            
-            # 父菜单类型验证:不应该有路径和组件
-            if menu_type == 'parent':
-                if menu_data.get('path') or menu_data.get('component'):
-                    return False, "父菜单不应该配置路由路径和组件路径"
-            
-            # 功能按钮类型验证:必须有父菜单
-            if menu_type == 'button' and not menu_data.get('parent_id'):
-                return False, "功能按钮必须指定父菜单"
-            
             # 创建菜单
             menu_id = str(uuid.uuid4())
             cursor.execute("""
@@ -842,10 +804,8 @@ class SystemService:
                 VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW(), NOW())
             """, (
                 menu_id, menu_data.get('parent_id'), menu_data['name'], menu_data['title'],
-                menu_data.get('path') if menu_type != 'parent' else None, 
-                menu_data.get('component') if menu_type != 'parent' else None, 
-                menu_data.get('icon'),
-                menu_data.get('sort_order', 0), menu_type,
+                menu_data.get('path'), menu_data.get('component'), menu_data.get('icon'),
+                menu_data.get('sort_order', 0), menu_data.get('menu_type', 'menu'),
                 menu_data.get('is_hidden', False), menu_data.get('is_active', True),
                 menu_data.get('description'), creator_id
             ))
@@ -869,24 +829,6 @@ class SystemService:
         cursor = conn.cursor()
         
         try:
-            # 验证菜单类型
-            if 'menu_type' in menu_data:
-                menu_type = menu_data['menu_type']
-                if menu_type not in ['parent', 'menu', 'button']:
-                    return False, "无效的菜单类型,只支持:parent(父菜单)、menu(菜单项)、button(功能按钮)"
-                
-                # 父菜单类型验证:不应该有路径和组件
-                if menu_type == 'parent':
-                    if menu_data.get('path') or menu_data.get('component'):
-                        return False, "父菜单不应该配置路由路径和组件路径"
-                    # 清空路径和组件
-                    menu_data['path'] = None
-                    menu_data['component'] = None
-                
-                # 功能按钮类型验证:必须有父菜单
-                if menu_type == 'button' and 'parent_id' in menu_data and not menu_data.get('parent_id'):
-                    return False, "功能按钮必须指定父菜单"
-            
             # 更新菜单
             update_fields = []
             update_values = []
@@ -924,37 +866,20 @@ class SystemService:
         cursor = conn.cursor()
         
         try:
-            # 检查菜单是否存在
-            cursor.execute("SELECT id, title FROM t_sys_menu WHERE id = %s", (menu_id,))
-            menu = cursor.fetchone()
-            if not menu:
-                return False, "菜单不存在"
-            
             # 检查是否有子菜单
             cursor.execute("SELECT COUNT(*) as count FROM t_sys_menu WHERE parent_id = %s", (menu_id,))
-            child_count = cursor.fetchone()['count']
-            if child_count > 0:
-                return False, "该菜单下还有子菜单,请先删除子菜单"
+            if cursor.fetchone()['count'] > 0:
+                return False, "该菜单下有子菜单,无法删除"
             
-            # 开始事务
-            cursor.execute("START TRANSACTION")
+            # 删除菜单相关数据
+            cursor.execute("DELETE FROM t_sys_role_menu WHERE menu_id = %s", (menu_id,))
+            cursor.execute("DELETE FROM t_sys_menu WHERE id = %s", (menu_id,))
             
-            try:
-                # 删除角色菜单关联
-                cursor.execute("DELETE FROM t_sys_role_menu WHERE menu_id = %s", (menu_id,))
-                
-                # 删除菜单
-                cursor.execute("DELETE FROM t_sys_menu WHERE id = %s", (menu_id,))
-                
-                # 提交事务
-                conn.commit()
-                return True, "菜单删除成功"
-            except Exception as e:
-                # 回滚事务
-                conn.rollback()
-                raise e
+            conn.commit()
+            return True, "菜单删除成功"
         except Exception as e:
             logger.exception("删除菜单错误")
+            conn.rollback()
             return False, "服务器内部错误"
         finally:
             cursor.close()
@@ -963,32 +888,39 @@ class SystemService:
     # ==================== 辅助方法 ====================
     
     def _build_menu_tree(self, menus: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
-        """构建菜单树结构"""
-        menu_map = {menu["id"]: menu for menu in menus}
+        """构建菜单树结构 (优化版:支持无限层级且不依赖输入顺序)"""
+        # 1. 预处理:创建映射并重置 children
+        menu_map = {}
+        for menu in menus:
+            menu["children"] = []
+            menu_map[menu["id"]] = menu
+            
         tree = []
         
+        # 2. 构建树形结构
         for menu in menus:
             parent_id = menu.get("parent_id")
-            if not parent_id:  # Handles None and ''
+            # 只有当 parent_id 为空、'0' 或空字符串时才作为根节点
+            if not parent_id or parent_id == '0' or parent_id == '':
                 tree.append(menu)
             else:
                 parent = menu_map.get(parent_id)
                 if parent:
-                    if "children" not in parent:
-                        parent["children"] = []
                     parent["children"].append(menu)
                 else:
-                    # 如果找不到父菜单,也作为根菜单显示,防止菜单丢失
+                    # 如果有父 ID 但找不到父对象,说明数据集不完整
+                    # 为了不让菜单完全消失,作为根显示并报警
+                    logger.warning(f"Menu {menu['title']}({menu['id']}) has parent_id {parent_id} but parent not found.")
                     tree.append(menu)
         
-        # 递归排序子菜单
-        def sort_children(nodes):
+        # 3. 递归排序所有层级
+        def sort_nodes(nodes):
             nodes.sort(key=lambda x: x.get("sort_order", 0))
             for node in nodes:
-                if "children" in node and node["children"]:
-                    sort_children(node["children"])
+                if node.get("children"):
+                    sort_nodes(node["children"])
         
-        sort_children(tree)
+        sort_nodes(tree)
         return tree
     
     def _hash_password(self, password: str) -> str:

+ 4 - 3
src/views/knowledge_base_view.py

@@ -22,7 +22,7 @@ security = HTTPBearer()
 @router.get("", response_model=PaginatedResponseSchema)
 async def get_knowledge_bases(
     page: int = Query(1, ge=1, description="页码"),
-    page_size: int = Query(10, ge=1, le=100, description="每页数量"),
+    page_size: int = Query(10, ge=1, le=1000, description="每页数量"),
     keyword: str = Query(None, description="搜索关键词"),
     status: str = Query(None, description="状态筛选"),
     db: AsyncSession = Depends(get_db),
@@ -55,11 +55,12 @@ async def get_knowledge_base_simple_list(
     if not payload:
         return ResponseSchema(code=401, message="无效的访问令牌")
 
-    items, _ = await knowledge_base_service.get_list(db, page=1, page_size=1000)
+    # 只获取状态正常的知识库
+    items, _ = await knowledge_base_service.get_list(db, page=1, page_size=1000, status="normal")
     return ResponseSchema(
         code=0,
         message="获取成功",
-        data=[{"id": item.id, "name": item.name, "collection_name": item.collection_name} for item in items]
+        data=[{"id": item.id, "name": item.name, "collection_name": item.collection_name_parent} for item in items]
     )
 
 @router.post("", response_model=ResponseSchema)

+ 21 - 8
src/views/sample_view.py

@@ -228,7 +228,10 @@ async def batch_enter_knowledge_base(req: BatchEnterRequest, credentials: HTTPAu
         if not payload or not payload.get("is_superuser"):
             return ApiResponse(code=403, message="权限不足", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
         
-        username = payload.get("sub", "admin")
+        username = payload.get("sub")
+        if not username:
+            return ApiResponse(code=401, message="令牌中缺少用户信息", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
+        
         sample_service = SampleService()
         
         affected_rows, message = await sample_service.batch_enter_knowledge_base(
@@ -271,7 +274,10 @@ async def batch_add_to_task(req: BatchDeleteRequest, credentials: HTTPAuthorizat
         if not payload or not payload.get("is_superuser"):
             return ApiResponse(code=403, message="权限不足", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
         
-        user_id = payload.get("sub", "admin")
+        user_id = payload.get("sub")
+        if not user_id:
+            return ApiResponse(code=401, message="令牌中缺少用户信息", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
+        
         username = payload.get("username", user_id)
         
         sample_service = SampleService()
@@ -341,7 +347,10 @@ async def add_document(doc: DocumentAdd, credentials: HTTPAuthorizationCredentia
         if not payload:
             return ApiResponse(code=401, message="无效的访问令牌", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
         
-        user_id = payload.get("sub", "admin")
+        user_id = payload.get("sub")
+        if not user_id:
+            return ApiResponse(code=401, message="令牌中缺少用户信息", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
+        
         sample_service = SampleService()
         
         # 将 DocumentAdd 对象转换为字典,包含所有字段
@@ -360,7 +369,6 @@ async def add_document(doc: DocumentAdd, credentials: HTTPAuthorizationCredentia
 @router.get("/documents/detail/{doc_id}")
 async def get_document_detail(doc_id: str, credentials: HTTPAuthorizationCredentials = Depends(security)):
     """获取文档详情 (关联查询子表)"""
-    logger.info(f"正在获取文档详情: {doc_id}")
     try:
         payload = verify_token(credentials.credentials)
         if not payload:
@@ -370,10 +378,8 @@ async def get_document_detail(doc_id: str, credentials: HTTPAuthorizationCredent
         doc = await sample_service.get_document_detail(doc_id)
         
         if not doc:
-            logger.warning(f"文档不存在: {doc_id}")
             return ApiResponse(code=404, message="文档不存在", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
         
-        logger.info(f"找到文档数据: {doc.get('title')}")
         return ApiResponse(code=0, message="获取详情成功", data=doc, timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
         
     except Exception as e:
@@ -571,7 +577,10 @@ async def add_basic_info(type: str, data: dict, credentials: HTTPAuthorizationCr
         if not payload:
             return ApiResponse(code=401, message="无效的访问令牌", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
         
-        user_id = payload.get("sub", "admin")
+        user_id = payload.get("sub")
+        if not user_id:
+            return ApiResponse(code=401, message="令牌中缺少用户信息", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
+        
         sample_service = SampleService()
         success, message, doc_id = await sample_service.add_basic_info(type, data, user_id)
         
@@ -591,8 +600,12 @@ async def edit_basic_info(type: str, id: str, data: dict, credentials: HTTPAutho
         if not payload:
             return ApiResponse(code=401, message="无效的访问令牌", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
         
+        user_id = payload.get("sub")
+        if not user_id:
+            return ApiResponse(code=401, message="令牌中缺少用户信息", timestamp=datetime.now(timezone.utc).isoformat()).model_dump()
+            
         sample_service = SampleService()
-        success, message = await sample_service.edit_basic_info(type, id, data, payload.get("sub", "admin"))
+        success, message = await sample_service.edit_basic_info(type, id, data, user_id)
         
         if success:
             return ApiResponse(code=0, message=message, timestamp=datetime.now(timezone.utc).isoformat()).model_dump()

+ 1 - 1
src/views/search_engine_view.py

@@ -40,7 +40,7 @@ async def search_knowledge_base(
 @router.get("", response_model=PaginatedResponseSchema)
 async def get_search_engines(
     page: int = Query(1, ge=1, description="页码"),
-    page_size: int = Query(10, ge=1, le=100, description="每页数量"),
+    page_size: int = Query(10, ge=1, le=1000, description="每页数量"),
     keyword: str = Query(None, description="搜索关键词"),
     status: str = Query(None, description="状态筛选"),
     db: AsyncSession = Depends(get_db),

+ 1 - 1
src/views/tag_view.py

@@ -72,7 +72,7 @@ async def list_tag_categories(
     parent_id: int = Query(None, description="父级分类ID"),
     status: int = Query(None, description="状态筛选"),
     page: int = Query(1, ge=1, description="页码"),
-    page_size: int = Query(10, ge=1, le=100, description="每页数量"),
+    page_size: int = Query(10, ge=1, le=1000, description="每页数量"),
     session: AsyncSession = Depends(get_db)
 ):
     """列表查询标签分类"""

+ 0 - 54
test_datetime_format.html

@@ -1,54 +0,0 @@
-<!DOCTYPE html>
-<html>
-<head>
-    <title>DateTime Format Test</title>
-</head>
-<body>
-    <h1>DateTime Format Test</h1>
-    <div id="results"></div>
-
-    <script>
-        // 格式化日期时间函数
-        const formatDateTime = (dateTime) => {
-            if (!dateTime) return '-'
-            const date = new Date(dateTime)
-            
-            // 格式化为 YYYY-MM-DD HH:mm:ss
-            const year = date.getFullYear()
-            const month = String(date.getMonth() + 1).padStart(2, '0')
-            const day = String(date.getDate()).padStart(2, '0')
-            const hours = String(date.getHours()).padStart(2, '0')
-            const minutes = String(date.getMinutes()).padStart(2, '0')
-            const seconds = String(date.getSeconds()).padStart(2, '0')
-            
-            return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`
-        }
-
-        // 测试数据
-        const testDates = [
-            '2026-01-06T17:46:31.000Z',
-            '2026-01-26T10:30:15.123Z',
-            '2025-12-25T23:59:59.999Z',
-            null,
-            undefined,
-            ''
-        ]
-
-        const resultsDiv = document.getElementById('results')
-        
-        testDates.forEach((date, index) => {
-            const formatted = formatDateTime(date)
-            const p = document.createElement('p')
-            p.innerHTML = `<strong>Test ${index + 1}:</strong> Input: ${date} → Output: ${formatted}`
-            resultsDiv.appendChild(p)
-        })
-
-        // 期望的输出格式示例
-        const expectedP = document.createElement('p')
-        expectedP.innerHTML = '<strong>Expected format:</strong> 2026-01-06 17:46:31'
-        expectedP.style.color = 'green'
-        expectedP.style.fontWeight = 'bold'
-        resultsDiv.appendChild(expectedP)
-    </script>
-</body>
-</html>

Some files were not shown because too many files changed in this diff