瀏覽代碼

配置文件更新

lingmin_package@163.com 1 月之前
父節點
當前提交
f7197ff4e4

+ 1 - 0
README_deploy.md

@@ -22,6 +22,7 @@
 
 
 #### 定义容器共享网络规范
+  - 查看目前的网络 docker network list
   - 创建自定义网络  docker network create lq_network
   -  重新创建网络(如果需要)
     - 删除自定义网络      docker network rm lq_network

+ 1 - 1
dev/app/LQAdminFrontServer/admin_front_deploy.sh

@@ -804,4 +804,4 @@ main() {
 }
 
 # 脚本入口
-main "$@"
+main "$@"

+ 8 - 3
dev/app/LQAdminServer/config.ini

@@ -38,7 +38,7 @@ JWT_SECRET_KEY=dev-jwt-secret-key-change-in-production-12345678901234567890
 
 
 # 后台管理Token配置
-ADMIN_TOKEN_EXPIRE_MINUTES=60
+ADMIN_TOKEN_EXPIRE_MINUTES=10
 ADMIN_REFRESH_TOKEN_EXPIRE_HOURS=24
 
 # OAuth2配置
@@ -100,7 +100,8 @@ FILE_BASE_URL=http://192.168.91.15:19000/aidata/sampledata
 # MinERU 配置
 MINERU_ACCESS_KEY=
 MINERU_SECRET_KEY=
-MINERU_TOKEN=eyJ0eXBlIjoiSldUIiwiYWxnIjoiSFM1MTIifQ.eyJqdGkiOiIyNjQwMDgzNiIsInJvbCI6IlJPTEVfUkVHSVNURVIiLCJpc3MiOiJPcGVuWExhYiIsImlhdCI6MTc2OTE0NDEwMSwiY2xpZW50SWQiOiJsa3pkeDU3bnZ5MjJqa3BxOXgydyIsInBob25lIjoiMTk5ODA4ODg3ODAiLCJvcGVuSWQiOm51bGwsInV1aWQiOiJmNmVkMDk0YS0wYzM3LTQzN2EtYmIyMi04MTk1ODVmZWQ5ODgiLCJlbWFpbCI6IiIsImV4cCI6MTc3MDM1MzcwMX0.ZIJQGdyrhUjC2z3Eqgt_LDuT7-q1ByWqPJ_jJYRwSyvd9_ZhbsvnUahVYh_zZiWVjiVSgZsx9IdXGReIbRpGqg
+#MINERU_TOKEN=eyJ0eXBlIjoiSldUIiwiYWxnIjoiSFM1MTIifQ.eyJqdGkiOiIyNjQwMDgzNiIsInJvbCI6IlJPTEVfUkVHSVNURVIiLCJpc3MiOiJPcGVuWExhYiIsImlhdCI6MTc2OTE0NDEwMSwiY2xpZW50SWQiOiJsa3pkeDU3bnZ5MjJqa3BxOXgydyIsInBob25lIjoiMTk5ODA4ODg3ODAiLCJvcGVuSWQiOm51bGwsInV1aWQiOiJmNmVkMDk0YS0wYzM3LTQzN2EtYmIyMi04MTk1ODVmZWQ5ODgiLCJlbWFpbCI6IiIsImV4cCI6MTc3MDM1MzcwMX0.ZIJQGdyrhUjC2z3Eqgt_LDuT7-q1ByWqPJ_jJYRwSyvd9_ZhbsvnUahVYh_zZiWVjiVSgZsx9IdXGReIbRpGqg
+MINERU_TOKEN=eyJ0eXBlIjoiSldUIiwiYWxnIjoiSFM1MTIifQ.eyJqdGkiOiI0MjcwMDM4NiIsInJvbCI6IlJPTEVfUkVHSVNURVIiLCJpc3MiOiJPcGVuWExhYiIsImlhdCI6MTc3MDM2MjIyNSwiY2xpZW50SWQiOiJsa3pkeDU3bnZ5MjJqa3BxOXgydyIsInBob25lIjoiIiwib3BlbklkIjpudWxsLCJ1dWlkIjoiYzgzOWVlYTAtYWZkOC00YTdjLWJmMTUtNTQ1YTU3ODQ2M2ZkIiwiZW1haWwiOiIiLCJleHAiOjE3NzE1NzE4MjV9.0DmDNrg7eSq8PxY043dyW08eKcIJOSOsVIDUx9oAmuMV1bQ6fMKBAXE1blL6mWyDn6B6jdbt3OESnVdNm3TqDQ
 MINERU_API_APPLY=https://mineru.net/api/v4/file-urls/batch
 MINERU_API_BATCH_RESULT=https://mineru.net/api/v4/extract-results/batch/{}
 
@@ -112,4 +113,8 @@ EMBEDDING_API_KEY=dummy
 
 
 
-
+# 外部标注平台配置
+[external_api]
+admin_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ1c2VyXzIwMjYwMTI5MTUxMTM4XzkzYjIyMjZkIiwidXNlcm5hbWUiOiJhZG1pbiIsImVtYWlsIjoiYWRtaW5AZXhhbXBsZS5jb20iLCJyb2xlIjoiYWRtaW4iLCJleHAiOjEwNDEyMzM3MDQ3LCJpYXQiOjE3NzI0MjM0NDcsInR5cGUiOiJhY2Nlc3MifQ.k5e_gyb9OvBJnztwYLcaZA80dgVKI_6LmfcfCNFTEi8
+project_api_url=http://192.168.92.61:9003/api/external/projects
+download_base_url=http://192.168.92.61:9003

+ 1 - 1
dev/app/LQAdminServer/docker-compose.yml

@@ -1,7 +1,7 @@
 services:
 
   LQAdminServer:
-    image: lq_admin_platform_server:v0.15
+    image: lq_admin_platform_server:v0.37
     container_name: LQAdminServer
     restart: always
     

+ 333 - 0
dev/app/LQAgentServer/deploy_agent.sh

@@ -0,0 +1,333 @@
+#!/bin/bash
+
+#!/bin/bash
+
+# ============ 强制要求 Bash 执行 ============
+if [ -z "$BASH_VERSION" ]; then
+    echo "****************************************************************"
+    echo "* 错误:此脚本必须使用 bash 执行,不支持 sh/dash!"
+    echo "* 请使用以下任一方式运行:"
+    echo "*   1. 赋予执行权限后: ./deploy_agent.sh"
+    echo "*   2. 显式指定 bash:  bash deploy_agent.sh"
+    echo "****************************************************************"
+    exit 1
+fi
+# ==========================================
+
+# ================= 配置区域 =================
+# 源代码路径
+SOURCE_DIR="/home/lq/lq_workspace/LQAgentServer/source/LQAgentPlatform"
+# Docker Compose 运行路径
+DOCKER_APP_DIR="/home/lq/lq_workspace/LQAgentServer/app/docker"
+# 配置文件名称
+COMPOSE_FILE="docker-compose.yml"
+# 镜像名称 (Repository)
+IMAGE_NAME="lq_agent_platform_server_dev"
+# Git 凭证
+GIT_USER="WangXuMing"
+GIT_PASS="123456"
+
+# ================= 辅助函数 =================
+# 打印带时间戳的日志
+log_info() {
+    echo -e "\033[32m[INFO] $(date '+%Y-%m-%d %H:%M:%S') - $1\033[0m"
+}
+
+log_error() {
+    echo -e "\033[31m[ERROR] $(date '+%Y-%m-%d %H:%M:%S') - $1\033[0m"
+}
+
+log_warn() {
+    echo -e "\033[33m[WARN] $(date '+%Y-%m-%d %H:%M:%S') - $1\033[0m"
+}
+
+# 检查命令执行状态,如果失败则退出
+check_status() {
+    if [ $? -ne 0 ]; then
+        log_error "$1 执行失败,脚本终止。"
+        exit 1
+    fi
+}
+
+# 版本号比较函数
+# 返回: 0=相等, 1=第一个大于第二个, 2=第二个大于第一个
+compare_versions() {
+    v1=$(echo "$1" | sed 's/v//')
+    v2=$(echo "$2" | sed 's/v//')
+    
+    if [ "$(echo "$v1 == $v2" | bc)" -eq 1 ]; then
+        return 0
+    elif [ "$(echo "$v1 > $v2" | bc)" -eq 1 ]; then
+        return 1
+    else
+        return 2
+    fi
+}
+
+# ================= 步骤 1: Git 拉取代码 (带重试+强制拉取) =================
+log_info "步骤 1: 进入源码目录并拉取最新代码..."
+
+if [ ! -d "$SOURCE_DIR" ]; then
+    log_error "源码目录不存在: $SOURCE_DIR"
+    exit 1
+fi
+
+# 检查目录进入权限并修复
+if [ ! -x "$SOURCE_DIR" ]; then
+    log_error "源码目录无进入权限!正在修复..."
+    sudo chmod +x "$SOURCE_DIR"
+    sudo chown -R lq:lq "$SOURCE_DIR"
+fi
+
+cd "$SOURCE_DIR" || {
+    log_error "进入源码目录失败!路径:$SOURCE_DIR"
+    log_error "可能原因:1. 目录权限不足 2. 路径含特殊字符 3. 目录被删除"
+    exit 1
+}
+check_status "进入源码目录"  # 双重保障
+
+# 检查是否为 Git 仓库
+if [ ! -d ".git" ]; then
+    log_error "当前目录不是 Git 仓库!路径:$SOURCE_DIR"
+    exit 1
+fi
+
+log_info "检查本地是否存在可能与远程冲突的已修改文件..."
+
+HAS_CONFLICT_FILES=$(git status --porcelain | grep -v "^??")
+
+if [ -n "$HAS_CONFLICT_FILES" ]; then
+    log_info "发现以下文件存在本地修改(将被远程最新代码覆盖):"
+    echo "$HAS_CONFLICT_FILES" | awk '{print "  - " $2}'
+    log_info "正在强制丢弃本地修改,确保同步远程最新代码..."
+    
+    # 强制丢弃修改
+    git checkout -- .  # 仅丢弃已跟踪文件的本地修改(冲突风险文件)
+
+    
+    log_info "本地冲突文件修改已丢弃,准备拉取远程最新代码..."
+else
+    log_info "本地无可能冲突的已修改文件,直接拉取远程最新代码..."
+fi
+
+
+# 组装 Git 认证 URL(保留原逻辑)
+ORIGIN_URL=$(git remote get-url origin 2>/dev/null)
+if [ $? -ne 0 ]; then
+    log_error "获取 Git 远程地址失败!请检查 remote 配置"
+    exit 1
+fi
+
+# 初始化认证 URL(默认使用 origin 远程)
+CLEAN_URL=${ORIGIN_URL#*://}
+AUTH_URL="http://${GIT_USER}:${GIT_PASS}@${CLEAN_URL}"
+# 定义备用远程(upstream)及认证 URL
+UPSTREAM_URL=$(git remote get-url upstream 2>/dev/null)
+if [ $? -ne 0 ]; then
+    log_warn "未配置 upstream 远程,503 时无法切换备用源"
+    UPSTREAM_AVAILABLE=0
+else
+    UPSTREAM_CLEAN_URL=${UPSTREAM_URL#*://}
+    UPSTREAM_AUTH_URL="http://${GIT_USER}:${GIT_PASS}@${UPSTREAM_CLEAN_URL}"
+    UPSTREAM_AVAILABLE=1
+fi
+
+MAX_RETRIES=3
+COUNT=0
+GIT_SUCCESS=0
+CURRENT_AUTH_URL="$AUTH_URL"  # 当前使用的认证 URL
+
+while [ $COUNT -lt $MAX_RETRIES ]; do
+    log_info "正在执行 Git Pull (第 $((COUNT+1)) 次尝试) - 强制拉取 dev 分支最新代码..."
+    log_info "当前使用远程地址:${CURRENT_AUTH_URL}"
+    
+    # 执行 git pull 并捕获错误输出
+    PULL_OUTPUT=$(git pull "$CURRENT_AUTH_URL" dev --force --allow-unrelated-histories 2>&1)
+    PULL_EXIT_CODE=$?
+
+    if [ $PULL_EXIT_CODE -eq 0 ]; then
+        # 拉取成功:输出结果并退出循环
+        GIT_SUCCESS=1
+        LATEST_COMMIT=$(git log -1 --format="%h - %s ")
+        log_info "Git Pull 成功!当前部署提交版本:$LATEST_COMMIT"
+        break
+    else
+        # 拉取失败:判断错误类型(新增 returned error: 503 匹配规则)
+        if echo "$PULL_OUTPUT" | grep -qiE "503 Service Unavailable|503 Unavailable|returned error: 503" && [ $UPSTREAM_AVAILABLE -eq 1 ]; then
+            # 错误类型:503 服务不可用 + 有备用 upstream 远程
+            log_error "Git Pull 失败:当前远程(origin)返回 503 不可达,切换到备用远程(upstream)重试..."
+            log_error "错误详情:$PULL_OUTPUT"
+            CURRENT_AUTH_URL="$UPSTREAM_AUTH_URL"  # 切换为 upstream 认证 URL
+            COUNT=$((COUNT+1))
+            sleep 3
+        elif echo "$PULL_OUTPUT" | grep -qiE "503 Service Unavailable|503 Unavailable|returned error: 503" && [ $UPSTREAM_AVAILABLE -eq 0 ]; then
+            # 错误类型:503 但无备用源
+            log_error "Git Pull 失败:远程返回 503 不可达,但未配置 upstream 备用源,无法切换..."
+            log_error "错误详情:$PULL_OUTPUT"
+            COUNT=$((COUNT+1))
+            sleep 3
+        else
+            # 其他错误(如认证失败、网络不通、分支不存在等):按原逻辑重试
+            log_error "Git Pull 失败(非 503 错误),准备重试..."
+            log_error "错误详情:$PULL_OUTPUT"
+            COUNT=$((COUNT+1))
+            sleep 3
+        fi
+    fi
+done
+
+# 所有重试失败后的处理
+if [ $GIT_SUCCESS -eq 0 ]; then
+    log_error "Git Pull 已重试 $MAX_RETRIES 次,全部失败!"
+    exit 1
+fi
+
+# ================= 步骤 2: 关闭当前容器 =================
+log_info "步骤 2: 关闭正在运行的容器..."
+
+if [ ! -d "$DOCKER_APP_DIR" ]; then
+    log_error "Docker 运行目录不存在: $DOCKER_APP_DIR"
+    exit 1
+fi
+
+cd "$DOCKER_APP_DIR"
+check_status "进入 Docker 运行目录"
+
+docker compose down
+# 即使 down 失败(例如没启动),也继续执行,只记录错误
+if [ $? -ne 0 ]; then
+    log_error "警告: Docker Compose Down 返回非零状态,尝试继续..."
+fi
+
+# ================= 步骤 3: 获取当前运行版本并计算新版本号 =================
+log_info "步骤 3: 查找当前运行版本并计算新版本号..."
+
+# 获取当前 docker-compose 中指定的镜像版本
+CURRENT_CONFIG_TAG=$(grep "image: ${IMAGE_NAME}:" "$DOCKER_APP_DIR/$COMPOSE_FILE" | sed "s|.*image: ${IMAGE_NAME}:||")
+if [ -z "$CURRENT_CONFIG_TAG" ]; then
+    CURRENT_CONFIG_TAG="v0.01"
+    log_warn "未在配置文件中找到版本号,使用默认版本: $CURRENT_CONFIG_TAG"
+else
+    log_info "当前配置文件中的版本: $CURRENT_CONFIG_TAG"
+fi
+
+# 计算新版本号
+# 提取版本号数字 (去掉 'v'),例如 v0.13 -> 0.13
+VERSION_NUM=$(echo "$CURRENT_CONFIG_TAG" | sed 's/v//')
+# 计算新版本号 (这里设置为 +0.01,即 0.13 -> 0.14)
+NEW_VERSION_NUM=$(echo "$VERSION_NUM" | awk '{printf "%.2f", $1 + 0.01}')
+NEW_TAG="v$NEW_VERSION_NUM"
+
+log_info "计算出的新版本号为: $NEW_TAG"
+
+# ================= 步骤 4: 删除上上次的镜像版本 =================
+log_info "步骤 4: 清理旧镜像(保留最新版本,删除上上次版本)..."
+
+# 获取所有历史镜像,按创建时间降序排列
+# 使用 docker images --format 获取完整信息
+HISTORY_IMAGES=$(docker images --filter "reference=${IMAGE_NAME}:*" --format "{{.Tag}} {{.ID}} {{.CreatedAt}}" | sort -r)
+
+if [ -n "$HISTORY_IMAGES" ]; then
+    # 转换为数组
+    mapfile -t IMAGE_ARRAY <<< "$HISTORY_IMAGES"
+    
+    log_info "发现 ${#IMAGE_ARRAY[@]} 个历史镜像版本:"
+    for ((i=0; i<${#IMAGE_ARRAY[@]}; i++)); do
+        TAG=$(echo "${IMAGE_ARRAY[$i]}" | awk '{print $1}')
+        IMAGE_ID=$(echo "${IMAGE_ARRAY[$i]}" | awk '{print $2}')
+        CREATED=$(echo "${IMAGE_ARRAY[$i]}" | awk '{$1=$2=""; print $0}' | sed 's/^  //')
+        
+        # 标记当前运行版本
+        if [ "$TAG" = "$CURRENT_CONFIG_TAG" ]; then
+            log_info "  [$i] $TAG - $IMAGE_ID (当前运行版本) - $CREATED"
+            CURRENT_INDEX=$i
+        else
+            log_info "  [$i] $TAG - $IMAGE_ID - $CREATED"
+        fi
+    done
+    
+    # 保留策略:保留最新的(索引0)和当前的(如果有),删除上上次的(索引1,如果存在)
+    if [ ${#IMAGE_ARRAY[@]} -gt 1 ]; then
+        # 获取第二个镜像的信息(索引1)
+        SECOND_TAG=$(echo "${IMAGE_ARRAY[1]}" | awk '{print $1}')
+        SECOND_ID=$(echo "${IMAGE_ARRAY[1]}" | awk '{print $2}')
+        
+        # 检查是否是要删除的上上次版本
+        if [ "$SECOND_TAG" != "$NEW_TAG" ] && [ "$SECOND_TAG" != "$CURRENT_CONFIG_TAG" ]; then
+            log_info "正在删除上上次版本镜像: $SECOND_TAG ($SECOND_ID)"
+            
+            # 删除镜像
+            docker rmi -f "$SECOND_ID" 2>/dev/null
+            if [ $? -eq 0 ]; then
+                log_info "成功删除上上次版本镜像: $SECOND_TAG"
+            else
+                log_warn "删除镜像 $SECOND_TAG 失败(可能已被删除或正在使用),跳过..."
+            fi
+        else
+            log_info "跳过删除 $SECOND_TAG,因为它是当前运行版本或将要构建的新版本"
+        fi
+        
+        # 如果有第三个及以后的镜像,也删除(只保留最新的2个版本)
+        for ((i=2; i<${#IMAGE_ARRAY[@]}; i++)); do
+            OLD_TAG=$(echo "${IMAGE_ARRAY[$i]}" | awk '{print $1}')
+            OLD_ID=$(echo "${IMAGE_ARRAY[$i]}" | awk '{print $2}')
+            
+            if [ "$OLD_TAG" != "$NEW_TAG" ] && [ "$OLD_TAG" != "$CURRENT_CONFIG_TAG" ]; then
+                log_info "删除更旧的镜像: $OLD_TAG ($OLD_ID)"
+                docker rmi -f "$OLD_ID" 2>/dev/null
+            fi
+        done
+    else
+        log_info "只有1个历史镜像,无需清理"
+    fi
+else
+    log_info "未找到历史镜像"
+fi
+
+# ================= 步骤 5: 构建新镜像 =================
+log_info "步骤 5: 构建新镜像 $IMAGE_NAME:$NEW_TAG ..."
+
+cd "$SOURCE_DIR"
+check_status "返回源码目录"
+
+docker build -t "${IMAGE_NAME}:${NEW_TAG}" .
+check_status "镜像构建"
+log_info "镜像构建成功: ${IMAGE_NAME}:${NEW_TAG}"
+
+# ================= 步骤 6: 修改 docker-compose.yml 版本号 =================
+log_info "步骤 6: 更新 docker-compose.yml 中的版本号..."
+
+cd "$DOCKER_APP_DIR"
+check_status "进入 Docker 运行目录"
+
+if [ ! -f "$COMPOSE_FILE" ]; then
+    log_error "找不到配置文件: $COMPOSE_FILE"
+    exit 1
+fi
+
+# 使用 sed 正则替换
+# 匹配: image: lq_agent_platform_server_dev:任意字符
+# 替换为: image: lq_agent_platform_server_dev:新版本号
+sed -i "s|image: ${IMAGE_NAME}:.*|image: ${IMAGE_NAME}:${NEW_TAG}|" "$COMPOSE_FILE"
+check_status "修改 docker-compose.yml"
+
+# 验证修改结果
+MATCH_LINE=$(grep "image: ${IMAGE_NAME}:" "$COMPOSE_FILE")
+log_info "配置文件已更新: $MATCH_LINE"
+
+# ================= 步骤 7: 启动容器 =================
+log_info "步骤 7: 启动 Docker Compose..."
+
+docker compose up -d
+check_status "启动容器"
+
+# ================= 步骤 8: 显示当前保留的镜像 =================
+log_info "步骤 8: 当前保留的镜像版本列表:"
+docker images --filter "reference=${IMAGE_NAME}:*" --format "table {{.Tag}}\t{{.ID}}\t{{.Size}}\t{{.CreatedAt}}"
+
+log_info "===================================================="
+log_info " 开发版部署成功!"
+log_info " 当前运行端口: 8002"
+log_info " 部署版本: $NEW_TAG"
+log_info " 保留镜像: 最新版本 + 前一个版本"
+log_info "===================================================="

+ 242 - 0
dev/app/LQAgentServer/deploy_bak.sh

@@ -0,0 +1,242 @@
+#!/bin/bash
+
+# ================= 配置区域 =================
+# 源代码路径
+SOURCE_DIR="/home/lq/lq_workspace/LQAgentServer/source/LQAgentPlatform"
+# Docker Compose 运行路径
+DOCKER_APP_DIR="/home/lq/lq_workspace/LQAgentServer/app/docker"
+# 配置文件名称
+COMPOSE_FILE="docker-compose.yml"
+# 镜像名称 (Repository)
+IMAGE_NAME="lq_agent_platform_server_dev"
+# Git 凭证
+GIT_USER="WangXuMing"
+GIT_PASS="123456"
+
+# ================= 辅助函数 =================
+# 打印带时间戳的日志
+log_info() {
+    echo -e "\033[32m[INFO] $(date '+%Y-%m-%d %H:%M:%S') - $1\033[0m"
+}
+
+log_error() {
+    echo -e "\033[31m[ERROR] $(date '+%Y-%m-%d %H:%M:%S') - $1\033[0m"
+}
+
+# 检查命令执行状态,如果失败则退出
+check_status() {
+    if [ $? -ne 0 ]; then
+        log_error "$1 执行失败,脚本终止。"
+        exit 1
+    fi
+}
+
+# ================= 步骤 1: Git 拉取代码 (带重试+强制拉取) =================
+log_info "步骤 1: 进入源码目录并拉取最新代码..."
+
+if [ ! -d "$SOURCE_DIR" ]; then
+    log_error "源码目录不存在: $SOURCE_DIR"
+    exit 1
+fi
+
+# 检查目录进入权限并修复
+if [ ! -x "$SOURCE_DIR" ]; then
+    log_error "源码目录无进入权限!正在修复..."
+    sudo chmod +x "$SOURCE_DIR"
+    sudo chown -R lq:lq "$SOURCE_DIR"
+fi
+
+cd "$SOURCE_DIR" || {
+    log_error "进入源码目录失败!路径:$SOURCE_DIR"
+    log_error "可能原因:1. 目录权限不足 2. 路径含特殊字符 3. 目录被删除"
+    exit 1
+}
+check_status "进入源码目录"  # 双重保障
+
+# 检查是否为 Git 仓库
+if [ ! -d ".git" ]; then
+    log_error "当前目录不是 Git 仓库!路径:$SOURCE_DIR"
+    exit 1
+fi
+
+log_info "检查本地是否存在可能与远程冲突的已修改文件..."
+
+HAS_CONFLICT_FILES=$(git status --porcelain | grep -v "^??")
+
+if [ -n "$HAS_CONFLICT_FILES" ]; then
+    log_info "发现以下文件存在本地修改(将被远程最新代码覆盖):"
+    echo "$HAS_CONFLICT_FILES" | awk '{print "  - " $2}'
+    log_info "正在强制丢弃本地修改,确保同步远程最新代码..."
+    
+    # 强制丢弃修改
+    git checkout -- .  # 仅丢弃已跟踪文件的本地修改(冲突风险文件)
+
+    
+    log_info "本地冲突文件修改已丢弃,准备拉取远程最新代码..."
+else
+    log_info "本地无可能冲突的已修改文件,直接拉取远程最新代码..."
+fi
+
+
+# 组装 Git 认证 URL(保留原逻辑)
+ORIGIN_URL=$(git remote get-url origin 2>/dev/null)
+if [ $? -ne 0 ]; then
+    log_error "获取 Git 远程地址失败!请检查 remote 配置"
+    exit 1
+fi
+
+# 初始化认证 URL(默认使用 origin 远程)
+CLEAN_URL=${ORIGIN_URL#*://}
+AUTH_URL="http://${GIT_USER}:${GIT_PASS}@${CLEAN_URL}"
+# 定义备用远程(upstream)及认证 URL
+UPSTREAM_URL=$(git remote get-url upstream 2>/dev/null)
+if [ $? -ne 0 ]; then
+    log_warn "未配置 upstream 远程,503 时无法切换备用源"
+    UPSTREAM_AVAILABLE=0
+else
+    UPSTREAM_CLEAN_URL=${UPSTREAM_URL#*://}
+    UPSTREAM_AUTH_URL="http://${GIT_USER}:${GIT_PASS}@${UPSTREAM_CLEAN_URL}"
+    UPSTREAM_AVAILABLE=1
+fi
+
+MAX_RETRIES=3
+COUNT=0
+GIT_SUCCESS=0
+CURRENT_AUTH_URL="$AUTH_URL"  # 当前使用的认证 URL
+
+while [ $COUNT -lt $MAX_RETRIES ]; do
+    log_info "正在执行 Git Pull (第 $((COUNT+1)) 次尝试) - 强制拉取 dev 分支最新代码..."
+    log_info "当前使用远程地址:${CURRENT_AUTH_URL}"
+    
+    # 执行 git pull 并捕获错误输出
+    PULL_OUTPUT=$(git pull "$CURRENT_AUTH_URL" dev --force --allow-unrelated-histories 2>&1)
+    PULL_EXIT_CODE=$?
+
+    if [ $PULL_EXIT_CODE -eq 0 ]; then
+        # 拉取成功:输出结果并退出循环
+        GIT_SUCCESS=1
+        LATEST_COMMIT=$(git log -1 --format="%h - %s ")
+        log_info "Git Pull 成功!当前部署提交版本:$LATEST_COMMIT"
+        break
+    else
+        # 拉取失败:判断错误类型(新增 returned error: 503 匹配规则)
+        if echo "$PULL_OUTPUT" | grep -qiE "503 Service Unavailable|503 Unavailable|returned error: 503" && [ $UPSTREAM_AVAILABLE -eq 1 ]; then
+            # 错误类型:503 服务不可用 + 有备用 upstream 远程
+            log_error "Git Pull 失败:当前远程(origin)返回 503 不可达,切换到备用远程(upstream)重试..."
+            log_error "错误详情:$PULL_OUTPUT"
+            CURRENT_AUTH_URL="$UPSTREAM_AUTH_URL"  # 切换为 upstream 认证 URL
+            COUNT=$((COUNT+1))
+            sleep 3
+        elif echo "$PULL_OUTPUT" | grep -qiE "503 Service Unavailable|503 Unavailable|returned error: 503" && [ $UPSTREAM_AVAILABLE -eq 0 ]; then
+            # 错误类型:503 但无备用源
+            log_error "Git Pull 失败:远程返回 503 不可达,但未配置 upstream 备用源,无法切换..."
+            log_error "错误详情:$PULL_OUTPUT"
+            COUNT=$((COUNT+1))
+            sleep 3
+        else
+            # 其他错误(如认证失败、网络不通、分支不存在等):按原逻辑重试
+            log_error "Git Pull 失败(非 503 错误),准备重试..."
+            log_error "错误详情:$PULL_OUTPUT"
+            COUNT=$((COUNT+1))
+            sleep 3
+        fi
+    fi
+done
+
+# 所有重试失败后的处理
+if [ $GIT_SUCCESS -eq 0 ]; then
+    log_error "Git Pull 已重试 $MAX_RETRIES 次,全部失败!"
+    exit 1
+fi
+
+# ================= 步骤 2: 关闭当前容器 =================
+log_info "步骤 2: 关闭正在运行的容器..."
+
+if [ ! -d "$DOCKER_APP_DIR" ]; then
+    log_error "Docker 运行目录不存在: $DOCKER_APP_DIR"
+    exit 1
+fi
+
+cd "$DOCKER_APP_DIR"
+check_status "进入 Docker 运行目录"
+
+docker compose down
+# 即使 down 失败(例如没启动),也继续执行,只记录错误
+if [ $? -ne 0 ]; then
+    log_error "警告: Docker Compose Down 返回非零状态,尝试继续..."
+fi
+
+# ================= 步骤 3: 提取版本号并删除旧镜像 =================
+log_info "步骤 3: 查找旧镜像并计算新版本号..."
+
+# 获取镜像信息,例如: lq_agent_platform_server   v0.13   76d87fcfb5e5
+IMAGE_INFO=$(docker images | grep "^${IMAGE_NAME} " | awk '{print $2, $3}' | head -n 1)
+
+NEW_TAG="v0.01" # 默认初始版本
+
+if [ -n "$IMAGE_INFO" ]; then
+    OLD_TAG=$(echo "$IMAGE_INFO" | awk '{print $1}')
+    IMAGE_ID=$(echo "$IMAGE_INFO" | awk '{print $2}')
+    
+    log_info "找到旧镜像: Tag=$OLD_TAG, ID=$IMAGE_ID"
+    
+    # 提取版本号数字 (去掉 'v'),例如 v0.13 -> 0.13
+    VERSION_NUM=$(echo "$OLD_TAG" | sed 's/v//')
+    
+    # 计算新版本号 (这里设置为 +0.01,即 0.13 -> 0.14)
+    NEW_VERSION_NUM=$(echo "$VERSION_NUM" | awk '{printf "%.2f", $1 + 0.01}')
+    NEW_TAG="v$NEW_VERSION_NUM"
+    
+    log_info "计算出的新版本号为: $NEW_TAG"
+    
+    log_info "正在删除旧镜像 ID: $IMAGE_ID ..."
+    docker rmi -f "$IMAGE_ID"
+    if [ $? -ne 0 ]; then
+        log_error "警告: 删除旧镜像失败,可能被占用,将继续构建。"
+    else
+        log_info "旧镜像删除成功。"
+    fi
+else
+    log_info "未找到旧镜像,将使用默认版本 $NEW_TAG 构建。"
+fi
+
+# ================= 步骤 4: 构建新镜像 =================
+log_info "步骤 4: 构建新镜像 $IMAGE_NAME:$NEW_TAG ..."
+
+cd "$SOURCE_DIR"
+check_status "返回源码目录"
+
+docker build -t "${IMAGE_NAME}:${NEW_TAG}" .
+check_status "镜像构建"
+log_info "镜像构建成功: ${IMAGE_NAME}:${NEW_TAG}"
+
+# ================= 步骤 5: 修改 docker-compose.yml 版本号 =================
+log_info "步骤 5: 更新 docker-compose.yml 中的版本号..."
+
+cd "$DOCKER_APP_DIR"
+check_status "进入 Docker 运行目录"
+
+if [ ! -f "$COMPOSE_FILE" ]; then
+    log_error "找不到配置文件: $COMPOSE_FILE"
+    exit 1
+fi
+
+# 使用 sed 正则替换
+# 匹配: image: lq_agent_platform_server:任意字符
+# 替换为: image: lq_agent_platform_server:新版本号
+sed -i "s|image: ${IMAGE_NAME}:.*|image: ${IMAGE_NAME}:${NEW_TAG}|" "$COMPOSE_FILE"
+check_status "修改 docker-compose.yml"
+
+# 验证修改结果
+MATCH_LINE=$(grep "image: ${IMAGE_NAME}:" "$COMPOSE_FILE")
+log_info "配置文件已更新: $MATCH_LINE"
+
+# ================= 步骤 6: 启动容器 =================
+log_info "步骤 6: 启动 Docker Compose..."
+
+docker compose up -d
+check_status "启动容器"
+
+log_info "===================================================="
+log_info " 开发版部署成功!当前运行端口8002,部署版本: $NEW_TAG"
+log_info "===================================================="

+ 1 - 1
dev/app/LQAgentServer/docker-compose.yml

@@ -1,6 +1,6 @@
 services:
   LQAgentServer:
-    image: lq_agent_platform_server_dev:v0.16
+    image: lq_agent_platform_server_dev:v0.21
     container_name: LQAgentServer_dev
     restart: always
     

+ 113 - 0
dev/mnerU/compose.yaml

@@ -0,0 +1,113 @@
+services:
+  mineru-openai-server:
+    image: mineru:latest
+    container_name: mineru-openai-server
+    restart: always
+    profiles: ["openai-server"]
+    ports:
+      - 30000:30000
+    environment:
+      MINERU_MODEL_SOURCE: local
+    entrypoint: mineru-openai-server
+    command:
+      --host 0.0.0.0
+      --port 30000
+      # --data-parallel-size 2  # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
+      # --gpu-memory-utilization 0.5  # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
+    ulimits:
+      memlock: -1
+      stack: 67108864
+    ipc: host
+    healthcheck:
+      test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
+    deploy:
+      resources:
+        reservations:
+          devices:
+            - driver: nvidia
+              device_ids: ["0"]  # Modify for multiple GPUs: ["0", "1"]
+              capabilities: [gpu]
+
+  mineru-api:
+    image: mineru:latest
+    container_name: mineru-api
+    restart: always
+    profiles: ["api"]
+    ports:
+      - 23424:8000
+    environment:
+      #MINERU_MODEL_SOURCE: local
+      # 模型源:与 --source modelscope 保持一致
+      - MINERU_MODEL_SOURCE=modelscope
+      # 模型缓存路径(容器内)
+      - MODELSCOPE_CACHE=/root/.cache/modelscope
+      - MINERU_CACHE_DIR=/root/.cache/mineru
+      # Transformers/HF 缓存,避免路径冲突
+      - TRANSFORMERS_CACHE=/root/.cache/huggingface/transformers
+      - HF_HOME=/root/.cache/huggingface
+      # 日志与语言
+      - LOG_DIR=/app/logs
+      - LANG=zh_CN.UTF-8
+      - PYTHONUNBUFFERED=1
+    entrypoint: mineru-api
+    command:
+      --host 0.0.0.0
+      --port 8000
+      # parameters for vllm-engine
+      # --data-parallel-size 2  # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
+      # --gpu-memory-utilization 0.5  # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
+    volumes:
+      # 1. 模型缓存持久化 (核心:避免重复下载)
+      - /home/ubuntu/.cache/modelscope:/root/.cache/modelscope:rw
+      # 2. MinerU 缓存持久化
+      - /home/ubuntu/.cache/mineru:/root/.cache/mineru:rw
+      - /home/ubuntu/.cache/huggingface:/root/.cache/huggingface:rw  # 新增:避免 transformers 缓存冲突
+      # 3. 日志目录映射
+      - /home/ubuntu/minerU/logs:/app/logs:rw
+      # 4. 输入文件目录 (可选,如果 API 支持文件上传处理)
+      - /home/ubuntu/minerU/input:/app/input:ro
+      # 5. 输出结果目录 (可选)
+      - /home/ubuntu/minerU/output:/app/output:rw
+      # 6. 配置文件目录 (可选,如有自定义配置)
+      - /home/ubuntu/minerU/config:/app/config:ro
+    ulimits:
+      memlock: -1
+      stack: 67108864
+    ipc: host
+    deploy:
+      resources:
+        reservations:
+          devices:
+            - driver: nvidia
+              device_ids: ["0"]  # Modify for multiple GPUs: ["0", "1"]
+              capabilities: [gpu]
+
+  mineru-gradio:
+    image: mineru:latest
+    container_name: mineru-gradio
+    restart: always
+    profiles: ["gradio"]
+    ports:
+      - 23425:7860
+    environment:
+      MINERU_MODEL_SOURCE: local
+    entrypoint: mineru-gradio
+    command:
+      --server-name 0.0.0.0
+      --server-port 7860
+      # --enable-api false  # If you want to disable the API, set this to false
+      # --max-convert-pages 20  # If you want to limit the number of pages for conversion, set this to a specific number
+      # parameters for vllm-engine
+      # --data-parallel-size 2  # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
+      # --gpu-memory-utilization 0.5  # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
+    ulimits:
+      memlock: -1
+      stack: 67108864
+    ipc: host
+    deploy:
+      resources:
+        reservations:
+          devices:
+            - driver: nvidia
+              device_ids: ["0"]  # Modify for multiple GPUs: ["0", "1"]
+              capabilities: [gpu]

+ 87 - 0
dev/mnerU/compose.yaml.bak

@@ -0,0 +1,87 @@
+services:
+  mineru-openai-server:
+    image: mineru:latest
+    container_name: mineru-openai-server
+    restart: always
+    profiles: ["openai-server"]
+    ports:
+      - 30000:30000
+    environment:
+      MINERU_MODEL_SOURCE: local
+    entrypoint: mineru-openai-server
+    command:
+      --host 0.0.0.0
+      --port 30000
+      # --data-parallel-size 2  # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
+      # --gpu-memory-utilization 0.5  # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
+    ulimits:
+      memlock: -1
+      stack: 67108864
+    ipc: host
+    healthcheck:
+      test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
+    deploy:
+      resources:
+        reservations:
+          devices:
+            - driver: nvidia
+              device_ids: ["0"]  # Modify for multiple GPUs: ["0", "1"]
+              capabilities: [gpu]
+
+  mineru-api:
+    image: mineru:latest
+    container_name: mineru-api
+    restart: always
+    profiles: ["api"]
+    ports:
+      - 23424:8000
+    environment:
+      MINERU_MODEL_SOURCE: local
+    entrypoint: mineru-api
+    command:
+      --host 0.0.0.0
+      --port 8000
+      # parameters for vllm-engine
+      # --data-parallel-size 2  # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
+      # --gpu-memory-utilization 0.5  # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
+    ulimits:
+      memlock: -1
+      stack: 67108864
+    ipc: host
+    deploy:
+      resources:
+        reservations:
+          devices:
+            - driver: nvidia
+              device_ids: ["0"]  # Modify for multiple GPUs: ["0", "1"]
+              capabilities: [gpu]
+
+  mineru-gradio:
+    image: mineru:latest
+    container_name: mineru-gradio
+    restart: always
+    profiles: ["gradio"]
+    ports:
+      - 23425:7860
+    environment:
+      MINERU_MODEL_SOURCE: local
+    entrypoint: mineru-gradio
+    command:
+      --server-name 0.0.0.0
+      --server-port 7860
+      # --enable-api false  # If you want to disable the API, set this to false
+      # --max-convert-pages 20  # If you want to limit the number of pages for conversion, set this to a specific number
+      # parameters for vllm-engine
+      # --data-parallel-size 2  # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
+      # --gpu-memory-utilization 0.5  # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
+    ulimits:
+      memlock: -1
+      stack: 67108864
+    ipc: host
+    deploy:
+      resources:
+        reservations:
+          devices:
+            - driver: nvidia
+              device_ids: ["0"]  # Modify for multiple GPUs: ["0", "1"]
+              capabilities: [gpu]

+ 55 - 0
dev/mnerU/docker-compose-MinerU-api.yaml

@@ -0,0 +1,55 @@
+
+services:
+  mineru-api:
+      image: mineru:latest
+      container_name: mineru-api
+      restart: always
+      profiles: ["api"]
+      ports:
+        - 23424:8000
+      environment:
+        #MINERU_MODEL_SOURCE: local
+        # 模型源:与 --source modelscope 保持一致
+        - MINERU_MODEL_SOURCE=modelscope
+        # 模型缓存路径(容器内)
+        - MODELSCOPE_CACHE=/root/.cache/modelscope
+        - MINERU_CACHE_DIR=/root/.cache/mineru
+        # Transformers/HF 缓存,避免路径冲突
+        - TRANSFORMERS_CACHE=/root/.cache/huggingface/transformers
+        - HF_HOME=/root/.cache/huggingface
+        # 日志与语言
+        - LOG_DIR=/app/logs
+        - LANG=zh_CN.UTF-8
+        - PYTHONUNBUFFERED=1
+      entrypoint: mineru-api
+      command:
+        --host 0.0.0.0
+        --port 8000
+        # parameters for vllm-engine
+        # --data-parallel-size 2  # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
+        # --gpu-memory-utilization 0.5  # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
+    volumes:
+      # 1. 模型缓存持久化 (核心:避免重复下载)
+      - /home/ubuntu/.cache/modelscope:/root/.cache/modelscope:rw
+      # 2. MinerU 缓存持久化
+      - /home/ubuntu/.cache/mineru:/root/.cache/mineru:rw
+      - /home/ubuntu/.cache/huggingface:/root/.cache/huggingface:rw  # 新增:避免 transformers 缓存冲突
+      # 3. 日志目录映射
+      - /home/ubuntu/minerU/logs:/app/logs:rw
+      # 4. 输入文件目录 (可选,如果 API 支持文件上传处理)
+      - /home/ubuntu/minerU/input:/app/input:ro
+      # 5. 输出结果目录 (可选)
+      - /home/ubuntu/minerU/output:/app/output:rw
+      # 6. 配置文件目录 (可选,如有自定义配置)
+      - /home/ubuntu/minerU/config:/app/config:ro
+    ulimits:
+      memlock: -1
+      stack: 67108864
+    ipc: host
+    deploy:
+      resources:
+        reservations:
+          devices:
+            - driver: nvidia
+              device_ids: ["0"]  # Modify for multiple GPUs: ["0", "1"]
+              capabilities: [gpu]

+ 68 - 0
dev/nginx-dev/app_conf/lq_admin_server.conf

@@ -0,0 +1,68 @@
+       
+server{
+
+	listen 80;
+        server_name _;
+        root /usr/share/nginx/html;
+        index index.html index.htm;
+
+       # 如果请求根目录,重定向到 index.html
+       location = / {
+          try_files $uri $uri/ /index.html;
+       }
+
+
+        # 静态资源缓存
+        location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
+            expires 1y;
+            add_header Cache-Control "public, immutable";
+            access_log off;
+        }
+
+        # HTML 文件不缓存
+        location ~* \.html$ {
+            expires -1;
+            add_header Cache-Control "no-cache, no-store, must-revalidate";
+            add_header Pragma "no-cache";
+        }
+
+        # API 代理(可选,如果需要代理到后端)
+        location /api/ {
+            proxy_pass http://LQAdminServer:8000/api/;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Proto $scheme;
+            proxy_connect_timeout 30s;
+            proxy_send_timeout 30s;
+            proxy_read_timeout 30s;
+        }
+
+        # OAuth 代理
+        location /oauth/ {
+            proxy_pass http://LQAdminServer:8000/oauth/;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Proto $scheme;
+        }
+
+        # SPA 路由支持
+        location / {
+            try_files $uri $uri/ /index.html;
+        }
+
+        # 健康检查
+        location /health {
+            access_log off;
+            return 200 "healthy\n";
+            add_header Content-Type text/plain;
+        }
+
+        # 安全配置
+        location ~ /\. {
+            deny all;
+            access_log off;
+            log_not_found off;
+        }
+}

+ 56 - 0
dev/nginx-dev/app_conf/lq_label.conf

@@ -0,0 +1,56 @@
+server {
+    listen 9003;
+    server_name _;
+    root /usr/share/nginx/html_app/lq_label;
+    index index.html index.htm;
+
+    # 如果请求根目录,重定向到 index.html
+    location = / {
+        try_files $uri $uri/ /index.html;
+    }
+
+    # 静态资源缓存
+    location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
+        expires 1y;
+        add_header Cache-Control "public, immutable";
+        access_log off;
+    }
+
+    # HTML 文件不缓存
+    location ~* \.html$ {
+        expires -1;
+        add_header Cache-Control "no-cache, no-store, must-revalidate";
+        add_header Pragma "no-cache";
+    }
+
+    # API 代理到后端
+    location /api/ {
+        proxy_pass http://lq-label-backend:8000/api/;
+        proxy_set_header Host $host;
+        proxy_set_header X-Real-IP $remote_addr;
+        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+        proxy_set_header X-Forwarded-Proto $scheme;
+        proxy_connect_timeout 30s;
+        proxy_send_timeout 30s;
+        proxy_read_timeout 30s;
+    }
+
+    # SPA 路由支持
+    location / {
+        try_files $uri $uri/ /index.html;
+    }
+
+    # 健康检查
+    location /health {
+        access_log off;
+        return 200 "healthy\n";
+        add_header Content-Type text/plain;
+    }
+
+    # 安全配置
+    location ~ /\. {
+        deny all;
+        access_log off;
+        log_not_found off;
+    }
+}

+ 67 - 0
dev/nginx-dev/app_conf/sub_system_server.conf

@@ -0,0 +1,67 @@
+
+    server {
+        listen 9100;
+        server_name _;
+        root /usr/share/nginx/html_app/sub_system;
+        index index.html index.htm;
+
+       # 如果请求根目录,重定向到 index.html
+       location = / {
+          try_files $uri $uri/ /index.html;
+       }
+
+
+        # 静态资源缓存
+        location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
+            expires 1y;
+            add_header Cache-Control "public, immutable";
+            access_log off;
+        }
+
+        # HTML 文件不缓存
+        location ~* \.html$ {
+            expires -1;
+            add_header Cache-Control "no-cache, no-store, must-revalidate";
+            add_header Pragma "no-cache";
+        }
+
+        # API 代理(可选,如果需要代理到后端)
+        location /api/ {
+            proxy_pass http://SubSystemServer:8100/api/;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Proto $scheme;
+            proxy_connect_timeout 30s;
+            proxy_send_timeout 30s;
+            proxy_read_timeout 30s;
+        }
+
+        # OAuth 代理
+        location /auth/ {
+            proxy_pass http://SubSystemServer:8100/auth/;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Proto $scheme;
+        }
+
+        # SPA 路由支持
+        location / {
+            try_files $uri $uri/ /index.html;
+        }
+
+        # 健康检查
+        location /health {
+            access_log off;
+            return 200 "healthy\n";
+            add_header Content-Type text/plain;
+        }
+
+        # 安全配置
+        location ~ /\. {
+            deny all;
+            access_log off;
+            log_not_found off;
+        }
+    }