compose.yaml 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. services:
  2. mineru-openai-server:
  3. image: mineru:latest
  4. container_name: mineru-openai-server
  5. restart: always
  6. profiles: ["openai-server"]
  7. ports:
  8. - 30000:30000
  9. environment:
  10. MINERU_MODEL_SOURCE: local
  11. entrypoint: mineru-openai-server
  12. command:
  13. --host 0.0.0.0
  14. --port 30000
  15. # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
  16. # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
  17. ulimits:
  18. memlock: -1
  19. stack: 67108864
  20. ipc: host
  21. healthcheck:
  22. test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
  23. deploy:
  24. resources:
  25. reservations:
  26. devices:
  27. - driver: nvidia
  28. device_ids: ["6"] # Modify for multiple GPUs: ["0", "1"]
  29. capabilities: [gpu]
  30. mineru-api:
  31. image: mineru:latest
  32. container_name: mineru-api
  33. restart: always
  34. profiles: ["api"]
  35. ports:
  36. - 25428:8000
  37. environment:
  38. #MINERU_MODEL_SOURCE: local
  39. # 模型源:与 --source modelscope 保持一致
  40. - MINERU_MODEL_SOURCE=modelscope
  41. # 模型缓存路径(容器内)
  42. - MODELSCOPE_CACHE=/root/.cache/modelscope
  43. - MINERU_CACHE_DIR=/root/.cache/mineru
  44. # Transformers/HF 缓存,避免路径冲突
  45. - TRANSFORMERS_CACHE=/root/.cache/huggingface/transformers
  46. - HF_HOME=/root/.cache/huggingface
  47. # 日志与语言
  48. - LOG_DIR=/app/logs
  49. - LANG=zh_CN.UTF-8
  50. - PYTHONUNBUFFERED=1
  51. entrypoint: mineru-api
  52. command:
  53. --host 0.0.0.0
  54. --port 8000
  55. # parameters for vllm-engine
  56. # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
  57. # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
  58. volumes:
  59. # 1. 模型缓存持久化 (核心:避免重复下载)
  60. - /data/app_workspace/.cache/modelscope:/root/.cache/modelscope:rw
  61. # 2. MinerU 缓存持久化
  62. - /data/app_workspace/.cache/mineru:/root/.cache/mineru:rw
  63. - /data/app_workspace/.cache/huggingface:/root/.cache/huggingface:rw # 新增:避免 transformers 缓存冲突
  64. # 3. 日志目录映射
  65. - /data/app_workspace/minerU/logs:/app/logs:rw
  66. # 4. 输入文件目录 (可选,如果 API 支持文件上传处理)
  67. - /data/app_workspace/minerU/input:/app/input:ro
  68. # 5. 输出结果目录 (可选)
  69. - /data/app_workspace/minerU/output:/app/output:rw
  70. # 6. 配置文件目录 (可选,如有自定义配置)
  71. - /data/app_workspace/minerU/config:/app/config:ro
  72. ulimits:
  73. memlock: -1
  74. stack: 67108864
  75. ipc: host
  76. deploy:
  77. resources:
  78. reservations:
  79. devices:
  80. - driver: nvidia
  81. device_ids: ["6"] # Modify for multiple GPUs: ["0", "1"]
  82. capabilities: [gpu]
  83. mineru-gradio:
  84. image: mineru:latest
  85. container_name: mineru-gradio
  86. restart: always
  87. profiles: ["gradio"]
  88. ports:
  89. - 7860:7860
  90. environment:
  91. MINERU_MODEL_SOURCE: local
  92. entrypoint: mineru-gradio
  93. command:
  94. --server-name 0.0.0.0
  95. --server-port 7860
  96. # --enable-api false # If you want to disable the API, set this to false
  97. # --max-convert-pages 20 # If you want to limit the number of pages for conversion, set this to a specific number
  98. # parameters for vllm-engine
  99. # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
  100. # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
  101. ulimits:
  102. memlock: -1
  103. stack: 67108864
  104. ipc: host
  105. deploy:
  106. resources:
  107. reservations:
  108. devices:
  109. - driver: nvidia
  110. device_ids: ["0"] # Modify for multiple GPUs: ["0", "1"]
  111. capabilities: [gpu]