| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113 |
- services:
- mineru-openai-server:
- image: mineru:latest
- container_name: mineru-openai-server
- restart: always
- profiles: ["openai-server"]
- ports:
- - 30000:30000
- environment:
- MINERU_MODEL_SOURCE: local
- entrypoint: mineru-openai-server
- command:
- --host 0.0.0.0
- --port 30000
- # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
- # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
- ulimits:
- memlock: -1
- stack: 67108864
- ipc: host
- healthcheck:
- test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
- deploy:
- resources:
- reservations:
- devices:
- - driver: nvidia
- device_ids: ["0"] # Modify for multiple GPUs: ["0", "1"]
- capabilities: [gpu]
- mineru-api:
- image: mineru:latest
- container_name: mineru-api
- restart: always
- profiles: ["api"]
- ports:
- - 25429:8000
- environment:
- #MINERU_MODEL_SOURCE: local
- # 模型源:与 --source modelscope 保持一致
- - MINERU_MODEL_SOURCE=modelscope
- # 模型缓存路径(容器内)
- - MODELSCOPE_CACHE=/root/.cache/modelscope
- - MINERU_CACHE_DIR=/root/.cache/mineru
- # Transformers/HF 缓存,避免路径冲突
- - TRANSFORMERS_CACHE=/root/.cache/huggingface/transformers
- - HF_HOME=/root/.cache/huggingface
- # 日志与语言
- - LOG_DIR=/app/logs
- - LANG=zh_CN.UTF-8
- - PYTHONUNBUFFERED=1
- entrypoint: mineru-api
- command:
- --host 0.0.0.0
- --port 8000
- # parameters for vllm-engine
- # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
- # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
- volumes:
- # 1. 模型缓存持久化 (核心:避免重复下载)
- - /home/ubuntu/.cache/modelscope:/root/.cache/modelscope:rw
- # 2. MinerU 缓存持久化
- - /home/ubuntu/.cache/mineru:/root/.cache/mineru:rw
- - /home/ubuntu/.cache/huggingface:/root/.cache/huggingface:rw # 新增:避免 transformers 缓存冲突
- # 3. 日志目录映射
- - /home/ubuntu/minerU/logs:/app/logs:rw
- # 4. 输入文件目录 (可选,如果 API 支持文件上传处理)
- - /home/ubuntu/minerU/input:/app/input:ro
- # 5. 输出结果目录 (可选)
- - /home/ubuntu/minerU/output:/app/output:rw
- # 6. 配置文件目录 (可选,如有自定义配置)
- - /home/ubuntu/minerU/config:/app/config:ro
- ulimits:
- memlock: -1
- stack: 67108864
- ipc: host
- deploy:
- resources:
- reservations:
- devices:
- - driver: nvidia
- device_ids: ["6"] # Modify for multiple GPUs: ["0", "1"]
- capabilities: [gpu]
- mineru-gradio:
- image: mineru:latest
- container_name: mineru-gradio
- restart: always
- profiles: ["gradio"]
- ports:
- - 23425:7860
- environment:
- MINERU_MODEL_SOURCE: local
- entrypoint: mineru-gradio
- command:
- --server-name 0.0.0.0
- --server-port 7860
- # --enable-api false # If you want to disable the API, set this to false
- # --max-convert-pages 20 # If you want to limit the number of pages for conversion, set this to a specific number
- # parameters for vllm-engine
- # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
- # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
- ulimits:
- memlock: -1
- stack: 67108864
- ipc: host
- deploy:
- resources:
- reservations:
- devices:
- - driver: nvidia
- device_ids: ["0"] # Modify for multiple GPUs: ["0", "1"]
- capabilities: [gpu]
|