mirror of
https://github.com/opendatalab/MinerU.git
synced 2026-04-05 23:48:36 +07:00
134 lines
4.2 KiB
YAML
134 lines
4.2 KiB
YAML
services:
|
|
mineru-openai-server:
|
|
image: mineru:latest
|
|
container_name: mineru-openai-server
|
|
restart: always
|
|
profiles: ["openai-server"]
|
|
ports:
|
|
- 30000:30000
|
|
environment:
|
|
MINERU_MODEL_SOURCE: local
|
|
entrypoint: mineru-openai-server
|
|
command:
|
|
# ==================== Engine Selection ====================
|
|
# WARNING: Only ONE engine can be enabled at a time!
|
|
# Choose 'vllm' OR 'lmdeploy' (uncomment one line below)
|
|
--engine vllm
|
|
# --engine lmdeploy
|
|
|
|
# ==================== vLLM Engine Parameters ====================
|
|
# Uncomment if using --engine vllm
|
|
--host 0.0.0.0
|
|
--port 30000
|
|
# Multi-GPU configuration (increase throughput)
|
|
# --data-parallel-size 2
|
|
# Single GPU memory optimization (reduce if VRAM insufficient)
|
|
# --gpu-memory-utilization 0.5 # Try 0.4 or lower if issues persist
|
|
|
|
# ==================== LMDeploy Engine Parameters ====================
|
|
# Uncomment if using --engine lmdeploy
|
|
# --server-name 0.0.0.0
|
|
# --server-port 30000
|
|
# Multi-GPU configuration (increase throughput)
|
|
# --dp 2
|
|
# Single GPU memory optimization (reduce if VRAM insufficient)
|
|
# --cache-max-entry-count 0.5 # Try 0.4 or lower if issues persist
|
|
ulimits:
|
|
memlock: -1
|
|
stack: 67108864
|
|
ipc: host
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
device_ids: ["0"] # Modify for multiple GPUs: ["0", "1"]
|
|
capabilities: [gpu]
|
|
|
|
mineru-api:
|
|
image: mineru:latest
|
|
container_name: mineru-api
|
|
restart: always
|
|
profiles: ["api"]
|
|
ports:
|
|
- 8000:8000
|
|
environment:
|
|
MINERU_MODEL_SOURCE: local
|
|
entrypoint: mineru-api
|
|
command:
|
|
# ==================== Server Configuration ====================
|
|
--host 0.0.0.0
|
|
--port 8000
|
|
|
|
# ==================== vLLM Engine Parameters ====================
|
|
# Multi-GPU configuration
|
|
# --data-parallel-size 2
|
|
# Single GPU memory optimization
|
|
# --gpu-memory-utilization 0.5 # Try 0.4 or lower if VRAM insufficient
|
|
|
|
# ==================== LMDeploy Engine Parameters ====================
|
|
# Multi-GPU configuration
|
|
# --dp 2
|
|
# Single GPU memory optimization
|
|
# --cache-max-entry-count 0.5 # Try 0.4 or lower if VRAM insufficient
|
|
ulimits:
|
|
memlock: -1
|
|
stack: 67108864
|
|
ipc: host
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
device_ids: ["0"] # Modify for multiple GPUs: ["0", "1"]
|
|
capabilities: [gpu]
|
|
|
|
mineru-gradio:
|
|
image: mineru:latest
|
|
container_name: mineru-gradio
|
|
restart: always
|
|
profiles: ["gradio"]
|
|
ports:
|
|
- 7860:7860
|
|
environment:
|
|
MINERU_MODEL_SOURCE: local
|
|
entrypoint: mineru-gradio
|
|
command:
|
|
# ==================== Gradio Server Configuration ====================
|
|
--server-name 0.0.0.0
|
|
--server-port 7860
|
|
|
|
# ==================== Gradio Feature Settings ====================
|
|
# --enable-api false # Disable API endpoint
|
|
# --max-convert-pages 20 # Limit conversion page count
|
|
|
|
# ==================== Engine Selection ====================
|
|
# WARNING: Only ONE engine can be enabled at a time!
|
|
|
|
# Option 1: vLLM Engine (recommended for most users)
|
|
--enable-vllm-engine true
|
|
# Multi-GPU configuration
|
|
# --data-parallel-size 2
|
|
# Single GPU memory optimization
|
|
# --gpu-memory-utilization 0.5 # Try 0.4 or lower if VRAM insufficient
|
|
|
|
# Option 2: LMDeploy Engine
|
|
# --enable-lmdeploy-engine true
|
|
# Multi-GPU configuration
|
|
# --dp 2
|
|
# Single GPU memory optimization
|
|
# --cache-max-entry-count 0.5 # Try 0.4 or lower if VRAM insufficient
|
|
ulimits:
|
|
memlock: -1
|
|
stack: 67108864
|
|
ipc: host
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
device_ids: ["0"] # Modify for multiple GPUs: ["0", "1"]
|
|
capabilities: [gpu]
|