services: mineru-openai-server: image: mineru:latest container_name: mineru-openai-server restart: always profiles: ["openai-server"] ports: - 30000:30000 environment: MINERU_MODEL_SOURCE: local entrypoint: mineru-openai-server command: --host 0.0.0.0 --port 30000 # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below. ulimits: memlock: -1 stack: 67108864 ipc: host healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"] deploy: resources: reservations: devices: - driver: nvidia device_ids: ["0"] # Modify for multiple GPUs: ["0", "1"] capabilities: [gpu] mineru-api: image: mineru:latest container_name: mineru-api restart: always profiles: ["api"] ports: - 8000:8000 environment: MINERU_MODEL_SOURCE: local entrypoint: mineru-api command: --host 0.0.0.0 --port 8000 # parameters for vllm-engine # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below. ulimits: memlock: -1 stack: 67108864 ipc: host healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:8000/health || exit 1"] deploy: resources: reservations: devices: - driver: nvidia device_ids: ["0"] # Modify for multiple GPUs: ["0", "1"] capabilities: [gpu] mineru-gradio: image: mineru:latest container_name: mineru-gradio restart: always profiles: ["gradio"] ports: - 7860:7860 environment: MINERU_MODEL_SOURCE: local entrypoint: mineru-gradio command: --server-name 0.0.0.0 --server-port 7860 # --enable-api false # If you want to disable the API, set this to false # --max-convert-pages 20 # If you want to limit the number of pages for conversion, set this to a specific number # parameters for vllm-engine # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below. ulimits: memlock: -1 stack: 67108864 ipc: host deploy: resources: reservations: devices: - driver: nvidia device_ids: ["0"] # Modify for multiple GPUs: ["0", "1"] capabilities: [gpu]