Merge pull request #24149 from doringeman/dmr-cli

vendor: github.com/docker/model-runner/cmd/cli v1.1.9-0.20260303081710-59280ed7abd5
This commit is contained in:
David Karlsson
2026-03-03 15:09:21 +01:00
committed by GitHub
39 changed files with 814 additions and 114 deletions

View File

@@ -1,8 +1,8 @@
# syntax=docker/dockerfile:1
# check=skip=InvalidBaseImagePlatform
ARG ALPINE_VERSION=3.21
ARG GO_VERSION=1.25
ARG ALPINE_VERSION=3.23
ARG GO_VERSION=1.26
ARG HTMLTEST_VERSION=0.17.0
ARG VALE_VERSION=3.11.2
ARG HUGO_VERSION=0.156.0

View File

@@ -10,6 +10,7 @@ cname:
- docker model df
- docker model inspect
- docker model install-runner
- docker model launch
- docker model list
- docker model logs
- docker model package
@@ -22,6 +23,9 @@ cname:
- docker model restart-runner
- docker model rm
- docker model run
- docker model search
- docker model show
- docker model skills
- docker model start-runner
- docker model status
- docker model stop-runner
@@ -34,6 +38,7 @@ clink:
- docker_model_df.yaml
- docker_model_inspect.yaml
- docker_model_install-runner.yaml
- docker_model_launch.yaml
- docker_model_list.yaml
- docker_model_logs.yaml
- docker_model_package.yaml
@@ -46,6 +51,9 @@ clink:
- docker_model_restart-runner.yaml
- docker_model_rm.yaml
- docker_model_run.yaml
- docker_model_search.yaml
- docker_model_show.yaml
- docker_model_skills.yaml
- docker_model_start-runner.yaml
- docker_model_status.yaml
- docker_model_stop-runner.yaml

View File

@@ -5,7 +5,7 @@ long: |-
This command runs a series of benchmarks with 1, 2, 4, and 8 concurrent requests by default,
measuring the tokens per second (TPS) that the model can generate.
usage: docker model bench [MODEL]
usage: docker model bench MODEL
pname: docker model
plink: docker_model.yaml
options:

View File

@@ -33,6 +33,15 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: runtime-flags
value_type: string
description: raw runtime flags to pass to the inference engine
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: speculative-draft-model
value_type: string
description: draft model for speculative decoding

View File

@@ -1,13 +1,17 @@
command: docker model configure
short: Configure runtime options for a model
long: Configure runtime options for a model
usage: docker model configure [--context-size=<n>] [--speculative-draft-model=<model>] [--hf_overrides=<json>] [--reasoning-budget=<n>] MODEL
aliases: docker model configure, docker model config
short: Manage model runtime configurations
long: Manage model runtime configurations
usage: docker model configure [--context-size=<n>] [--speculative-draft-model=<model>] [--hf_overrides=<json>] [--gpu-memory-utilization=<float>] [--mode=<mode>] [--think] [--keep-alive=<duration>] MODEL [-- <runtime-flags...>]
pname: docker model
plink: docker_model.yaml
cname:
- docker model configure show
clink:
- docker_model_configure_show.yaml
options:
- option: context-size
value_type: int64
default_value: "-1"
value_type: int32
description: context size (in tokens)
deprecated: false
hidden: false
@@ -15,6 +19,16 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: gpu-memory-utilization
value_type: float64
description: |
fraction of GPU memory to use for the model executor (0.0-1.0) - vLLM only
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: hf_overrides
value_type: string
description: HuggingFace model config overrides (JSON) - vLLM only
@@ -24,10 +38,20 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: reasoning-budget
value_type: int64
default_value: "0"
description: reasoning budget for reasoning models - llama.cpp only
- option: keep-alive
value_type: string
description: |
duration to keep model loaded (e.g., '5m', '1h', '0' to unload immediately, '-1' to never unload)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: mode
value_type: string
description: |
backend operation mode (completion, embedding, reranking, image-generation)
deprecated: false
hidden: false
experimental: false
@@ -63,6 +87,15 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: think
value_type: bool
description: enable reasoning mode for thinking models
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
hidden: true
experimental: false

View File

@@ -0,0 +1,13 @@
command: docker model configure show
short: Show model configurations
long: Show model configurations
usage: docker model configure show [MODEL]
pname: docker model configure
plink: docker_model_configure.yaml
deprecated: false
hidden: true
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@@ -8,7 +8,7 @@ plink: docker_model.yaml
options:
- option: backend
value_type: string
description: 'Specify backend (llama.cpp|vllm). Default: llama.cpp'
description: 'Specify backend (llama.cpp|vllm|diffusers). Default: llama.cpp'
deprecated: false
hidden: false
experimental: false
@@ -66,6 +66,54 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: proxy-cert
value_type: string
description: Path to a CA certificate file for proxy SSL inspection
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls
value_type: bool
default_value: "false"
description: Enable TLS/HTTPS for Docker Model Runner API
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls-cert
value_type: string
description: Path to TLS certificate file (auto-generated if not provided)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls-key
value_type: string
description: Path to TLS private key file (auto-generated if not provided)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls-port
value_type: uint16
default_value: "0"
description: |
TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
hidden: false
experimental: false

View File

@@ -0,0 +1,84 @@
command: docker model launch
short: Launch an app configured to use Docker Model Runner
long: |-
Launch an app configured to use Docker Model Runner.
Without arguments, lists all supported apps.
Supported apps: anythingllm, claude, codex, openclaw, opencode, openwebui
Examples:
docker model launch
docker model launch opencode
docker model launch claude -- --help
docker model launch openwebui --port 3000
docker model launch claude --config
usage: docker model launch [APP] [-- APP_ARGS...]
pname: docker model
plink: docker_model.yaml
options:
- option: config
value_type: bool
default_value: "false"
description: Print configuration without launching
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: detach
value_type: bool
default_value: "false"
description: Run containerized app in background
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: dry-run
value_type: bool
default_value: "false"
description: Print what would be executed without running it
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: image
value_type: string
description: Override container image for containerized apps
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: model
value_type: string
description: Model to use (for opencode)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: port
value_type: int
default_value: "0"
description: Host port to expose (web UIs)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@@ -2,7 +2,7 @@ command: docker model list
aliases: docker model list, docker model ls
short: List the models pulled to your local environment
long: List the models pulled to your local environment
usage: docker model list [OPTIONS]
usage: docker model list [OPTIONS] [MODEL]
pname: docker model
plink: docker_model.yaml
options:
@@ -26,6 +26,15 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: openaiurl
value_type: string
description: OpenAI-compatible API endpoint URL to list models from
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: quiet
shorthand: q
value_type: bool

View File

@@ -1,12 +1,45 @@
command: docker model package
short: |
Package a GGUF file, Safetensors directory, or existing model into a Docker model OCI artifact.
short: Package a model into a Docker Model OCI artifact
long: |-
Package a GGUF file, Safetensors directory, or existing model into a Docker model OCI artifact, with optional licenses. The package is sent to the model-runner, unless --push is specified.
When packaging a sharded GGUF model, --gguf should point to the first shard. All shard files should be siblings and should include the index in the file name (e.g. model-00001-of-00015.gguf).
When packaging a Safetensors model, --safetensors-dir should point to a directory containing .safetensors files and config files (*.json, merges.txt). All files will be auto-discovered and config files will be packaged into a tar archive.
When packaging from an existing model using --from, you can modify properties like context size to create a variant of the original model.
usage: docker model package (--gguf <path> | --safetensors-dir <path> | --from <model>) [--license <path>...] [--context-size <tokens>] [--push] MODEL
Package a model into a Docker Model OCI artifact.
The model source must be one of:
--gguf A GGUF file (single file or first shard of a sharded model)
--safetensors-dir A directory containing .safetensors and configuration files
--dduf A .dduf (Diffusers Unified Format) archive
--from An existing packaged model reference
By default, the packaged artifact is loaded into the local Model Runner content store.
Use --push to publish the model to a registry instead.
MODEL specifies the target model reference (for example: myorg/llama3:8b).
When using --push, MODEL must be a registry-qualified reference.
Packaging behavior:
GGUF
--gguf must point to a .gguf file.
For sharded models, point to the first shard. All shards must:
• reside in the same directory
• follow an indexed naming convention (e.g. model-00001-of-00015.gguf)
All shards are automatically discovered and packaged together.
Safetensors
--safetensors-dir must point to a directory containing .safetensors files
and required configuration files (e.g. model config, tokenizer files).
All files under the directory (including nested subdirectories) are
automatically discovered. Each file is packaged as a separate OCI layer.
DDUF
--dduf must point to a .dduf archive file.
Repackaging
--from repackages an existing model. You may override selected properties
such as --context-size to create a variant of the original model.
Multimodal models
Use --mmproj to include a multimodal projector file.
usage: docker model package (--gguf <path> | --safetensors-dir <path> | --dduf <path> | --from <model>) [--license <path>...] [--mmproj <path>] [--context-size <tokens>] [--push] MODEL
pname: docker model
plink: docker_model.yaml
options:
@@ -29,11 +62,9 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: dir-tar
value_type: stringArray
default_value: '[]'
description: |
relative path to directory to package as tar (can be specified multiple times)
- option: dduf
value_type: string
description: absolute path to DDUF archive file (Diffusers Unified Format)
deprecated: false
hidden: false
experimental: false
@@ -69,6 +100,15 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: mmproj
value_type: string
description: absolute path to multimodal projector file
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: push
value_type: bool
default_value: "false"

View File

@@ -5,18 +5,6 @@ long: |
usage: docker model pull MODEL
pname: docker model
plink: docker_model.yaml
options:
- option: ignore-runtime-memory-check
value_type: bool
default_value: "false"
description: |
Do not block pull if estimated runtime memory for model exceeds system resources.
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
examples: |-
### Pulling a model from Docker Hub

View File

@@ -1,6 +1,6 @@
command: docker model push
short: Push a model to Docker Hub
long: Push a model to Docker Hub
short: Push a model to Docker Hub or Hugging Face
long: Push a model to Docker Hub or Hugging Face
usage: docker model push MODEL
pname: docker model
plink: docker_model.yaml

View File

@@ -8,7 +8,7 @@ plink: docker_model.yaml
options:
- option: backend
value_type: string
description: 'Specify backend (llama.cpp|vllm). Default: llama.cpp'
description: 'Specify backend (llama.cpp|vllm|diffusers). Default: llama.cpp'
deprecated: false
hidden: false
experimental: false
@@ -66,6 +66,54 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: proxy-cert
value_type: string
description: Path to a CA certificate file for proxy SSL inspection
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls
value_type: bool
default_value: "false"
description: Enable TLS/HTTPS for Docker Model Runner API
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls-cert
value_type: string
description: Path to TLS certificate file (auto-generated if not provided)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls-key
value_type: string
description: Path to TLS private key file (auto-generated if not provided)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls-port
value_type: uint16
default_value: "0"
description: |
TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
hidden: false
experimental: false

View File

@@ -59,6 +59,15 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: proxy-cert
value_type: string
description: Path to a CA certificate file for proxy SSL inspection
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
hidden: false
experimental: false

View File

@@ -41,11 +41,9 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: ignore-runtime-memory-check
value_type: bool
default_value: "false"
description: |
Do not block pull if estimated runtime memory for model exceeds system resources.
- option: openaiurl
value_type: string
description: OpenAI-compatible API endpoint URL to chat with
deprecated: false
hidden: false
experimental: false

View File

@@ -0,0 +1,57 @@
command: docker model search
short: Search for models on Docker Hub and HuggingFace
long: |-
Search for models from Docker Hub (ai/ namespace) and HuggingFace.
When no search term is provided, lists all available models.
When a search term is provided, filters models by name/description.
Examples:
docker model search # List available models from Docker Hub
docker model search llama # Search for models containing "llama"
docker model search --source=all # Search both Docker Hub and HuggingFace
docker model search --source=huggingface # Only search HuggingFace
docker model search --limit=50 phi # Search with custom limit
docker model search --json llama # Output as JSON
usage: docker model search [OPTIONS] [TERM]
pname: docker model
plink: docker_model.yaml
options:
- option: json
value_type: bool
default_value: "false"
description: Output results as JSON
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: limit
shorthand: "n"
value_type: int
default_value: "32"
description: Maximum number of results to show
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: source
value_type: string
default_value: all
description: 'Source to search: all, dockerhub, huggingface'
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@@ -0,0 +1,25 @@
command: docker model show
short: Show information for a model
long: Display detailed information about a model in a human-readable format.
usage: docker model show MODEL
pname: docker model
plink: docker_model.yaml
options:
- option: remote
shorthand: r
value_type: bool
default_value: "false"
description: Show info for remote models
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@@ -0,0 +1,79 @@
command: docker model skills
short: Install Docker Model Runner skills for AI coding assistants
long: |-
Install Docker Model Runner skills for AI coding assistants.
Skills are configuration files that help AI coding assistants understand
how to use Docker Model Runner effectively for local model inference.
Supported targets:
--codex Install to ~/.codex/skills (OpenAI Codex CLI)
--claude Install to ~/.claude/skills (Claude Code)
--opencode Install to ~/.config/opencode/skills (OpenCode)
--dest Install to a custom directory
Example:
docker model skills --claude
docker model skills --codex --claude
docker model skills --dest /path/to/skills
usage: docker model skills
pname: docker model
plink: docker_model.yaml
options:
- option: claude
value_type: bool
default_value: "false"
description: Install skills for Claude Code (~/.claude/skills)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: codex
value_type: bool
default_value: "false"
description: Install skills for OpenAI Codex CLI (~/.codex/skills)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: dest
value_type: string
description: Install skills to a custom directory
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: force
shorthand: f
value_type: bool
default_value: "false"
description: Overwrite existing skills without prompting
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: opencode
value_type: bool
default_value: "false"
description: Install skills for OpenCode (~/.config/opencode/skills)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false

View File

@@ -10,7 +10,7 @@ plink: docker_model.yaml
options:
- option: backend
value_type: string
description: 'Specify backend (llama.cpp|vllm). Default: llama.cpp'
description: 'Specify backend (llama.cpp|vllm|diffusers). Default: llama.cpp'
deprecated: false
hidden: false
experimental: false
@@ -47,6 +47,16 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: host
value_type: string
default_value: 127.0.0.1
description: Host address to bind Docker Model Runner
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: port
value_type: uint16
default_value: "0"
@@ -58,6 +68,54 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: proxy-cert
value_type: string
description: Path to a CA certificate file for proxy SSL inspection
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls
value_type: bool
default_value: "false"
description: Enable TLS/HTTPS for Docker Model Runner API
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls-cert
value_type: string
description: Path to TLS certificate file (auto-generated if not provided)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls-key
value_type: string
description: Path to TLS private key file (auto-generated if not provided)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: tls-port
value_type: uint16
default_value: "0"
description: |
TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode)
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
deprecated: false
hidden: false
experimental: false

View File

@@ -5,31 +5,35 @@ Docker Model Runner
### Subcommands
| Name | Description |
|:------------------------------------------------|:------------------------------------------------------------------------------------------------|
| [`bench`](model_bench.md) | Benchmark a model's performance at different concurrency levels |
| [`df`](model_df.md) | Show Docker Model Runner disk usage |
| [`inspect`](model_inspect.md) | Display detailed information on one model |
| [`install-runner`](model_install-runner.md) | Install Docker Model Runner (Docker Engine only) |
| [`list`](model_list.md) | List the models pulled to your local environment |
| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs |
| [`package`](model_package.md) | Package a GGUF file, Safetensors directory, or existing model into a Docker model OCI artifact. |
| [`ps`](model_ps.md) | List running models |
| [`pull`](model_pull.md) | Pull a model from Docker Hub or HuggingFace to your local environment |
| [`purge`](model_purge.md) | Remove all models |
| [`push`](model_push.md) | Push a model to Docker Hub |
| [`reinstall-runner`](model_reinstall-runner.md) | Reinstall Docker Model Runner (Docker Engine only) |
| [`requests`](model_requests.md) | Fetch requests+responses from Docker Model Runner |
| [`restart-runner`](model_restart-runner.md) | Restart Docker Model Runner (Docker Engine only) |
| [`rm`](model_rm.md) | Remove local models downloaded from Docker Hub |
| [`run`](model_run.md) | Run a model and interact with it using a submitted prompt or chat mode |
| [`start-runner`](model_start-runner.md) | Start Docker Model Runner (Docker Engine only) |
| [`status`](model_status.md) | Check if the Docker Model Runner is running |
| [`stop-runner`](model_stop-runner.md) | Stop Docker Model Runner (Docker Engine only) |
| [`tag`](model_tag.md) | Tag a model |
| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner (Docker Engine only) |
| [`unload`](model_unload.md) | Unload running models |
| [`version`](model_version.md) | Show the Docker Model Runner version |
| Name | Description |
|:------------------------------------------------|:-----------------------------------------------------------------------|
| [`bench`](model_bench.md) | Benchmark a model's performance at different concurrency levels |
| [`df`](model_df.md) | Show Docker Model Runner disk usage |
| [`inspect`](model_inspect.md) | Display detailed information on one model |
| [`install-runner`](model_install-runner.md) | Install Docker Model Runner (Docker Engine only) |
| [`launch`](model_launch.md) | Launch an app configured to use Docker Model Runner |
| [`list`](model_list.md) | List the models pulled to your local environment |
| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs |
| [`package`](model_package.md) | Package a model into a Docker Model OCI artifact |
| [`ps`](model_ps.md) | List running models |
| [`pull`](model_pull.md) | Pull a model from Docker Hub or HuggingFace to your local environment |
| [`purge`](model_purge.md) | Remove all models |
| [`push`](model_push.md) | Push a model to Docker Hub or Hugging Face |
| [`reinstall-runner`](model_reinstall-runner.md) | Reinstall Docker Model Runner (Docker Engine only) |
| [`requests`](model_requests.md) | Fetch requests+responses from Docker Model Runner |
| [`restart-runner`](model_restart-runner.md) | Restart Docker Model Runner (Docker Engine only) |
| [`rm`](model_rm.md) | Remove local models downloaded from Docker Hub |
| [`run`](model_run.md) | Run a model and interact with it using a submitted prompt or chat mode |
| [`search`](model_search.md) | Search for models on Docker Hub and HuggingFace |
| [`show`](model_show.md) | Show information for a model |
| [`skills`](model_skills.md) | Install Docker Model Runner skills for AI coding assistants |
| [`start-runner`](model_start-runner.md) | Start Docker Model Runner (Docker Engine only) |
| [`status`](model_status.md) | Check if the Docker Model Runner is running |
| [`stop-runner`](model_stop-runner.md) | Stop Docker Model Runner (Docker Engine only) |
| [`tag`](model_tag.md) | Tag a model |
| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner (Docker Engine only) |
| [`unload`](model_unload.md) | Unload running models |
| [`version`](model_version.md) | Show the Docker Model Runner version |

View File

@@ -7,12 +7,17 @@ Install Docker Model Runner (Docker Engine only)
| Name | Type | Default | Description |
|:-----------------|:---------|:------------|:-------------------------------------------------------------------------------------------------------|
| `--backend` | `string` | | Specify backend (llama.cpp\|vllm). Default: llama.cpp |
| `--backend` | `string` | | Specify backend (llama.cpp\|vllm\|diffusers). Default: llama.cpp |
| `--debug` | `bool` | | Enable debug logging |
| `--do-not-track` | `bool` | | Do not track models usage in Docker Model Runner |
| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda\|rocm\|musa\|cann) |
| `--host` | `string` | `127.0.0.1` | Host address to bind Docker Model Runner |
| `--port` | `uint16` | `0` | Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) |
| `--proxy-cert` | `string` | | Path to a CA certificate file for proxy SSL inspection |
| `--tls` | `bool` | | Enable TLS/HTTPS for Docker Model Runner API |
| `--tls-cert` | `string` | | Path to TLS certificate file (auto-generated if not provided) |
| `--tls-key` | `string` | | Path to TLS private key file (auto-generated if not provided) |
| `--tls-port` | `uint16` | `0` | TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode) |
<!---MARKER_GEN_END-->

View File

@@ -0,0 +1,30 @@
# docker model launch
<!---MARKER_GEN_START-->
Launch an app configured to use Docker Model Runner.
Without arguments, lists all supported apps.
Supported apps: anythingllm, claude, codex, openclaw, opencode, openwebui
Examples:
docker model launch
docker model launch opencode
docker model launch claude -- --help
docker model launch openwebui --port 3000
docker model launch claude --config
### Options
| Name | Type | Default | Description |
|:------------|:---------|:--------|:------------------------------------------------|
| `--config` | `bool` | | Print configuration without launching |
| `--detach` | `bool` | | Run containerized app in background |
| `--dry-run` | `bool` | | Print what would be executed without running it |
| `--image` | `string` | | Override container image for containerized apps |
| `--model` | `string` | | Model to use (for opencode) |
| `--port` | `int` | `0` | Host port to expose (web UIs) |
<!---MARKER_GEN_END-->

View File

@@ -9,11 +9,12 @@ List the models pulled to your local environment
### Options
| Name | Type | Default | Description |
|:----------------|:-------|:--------|:--------------------------------|
| `--json` | `bool` | | List models in a JSON format |
| `--openai` | `bool` | | List models in an OpenAI format |
| `-q`, `--quiet` | `bool` | | Only show model IDs |
| Name | Type | Default | Description |
|:----------------|:---------|:--------|:-------------------------------------------------------|
| `--json` | `bool` | | List models in a JSON format |
| `--openai` | `bool` | | List models in an OpenAI format |
| `--openaiurl` | `string` | | OpenAI-compatible API endpoint URL to list models from |
| `-q`, `--quiet` | `bool` | | Only show model IDs |
<!---MARKER_GEN_END-->

View File

@@ -1,10 +1,44 @@
# docker model package
<!---MARKER_GEN_START-->
Package a GGUF file, Safetensors directory, or existing model into a Docker model OCI artifact, with optional licenses. The package is sent to the model-runner, unless --push is specified.
When packaging a sharded GGUF model, --gguf should point to the first shard. All shard files should be siblings and should include the index in the file name (e.g. model-00001-of-00015.gguf).
When packaging a Safetensors model, --safetensors-dir should point to a directory containing .safetensors files and config files (*.json, merges.txt). All files will be auto-discovered and config files will be packaged into a tar archive.
When packaging from an existing model using --from, you can modify properties like context size to create a variant of the original model.
Package a model into a Docker Model OCI artifact.
The model source must be one of:
--gguf A GGUF file (single file or first shard of a sharded model)
--safetensors-dir A directory containing .safetensors and configuration files
--dduf A .dduf (Diffusers Unified Format) archive
--from An existing packaged model reference
By default, the packaged artifact is loaded into the local Model Runner content store.
Use --push to publish the model to a registry instead.
MODEL specifies the target model reference (for example: myorg/llama3:8b).
When using --push, MODEL must be a registry-qualified reference.
Packaging behavior:
GGUF
--gguf must point to a .gguf file.
For sharded models, point to the first shard. All shards must:
• reside in the same directory
• follow an indexed naming convention (e.g. model-00001-of-00015.gguf)
All shards are automatically discovered and packaged together.
Safetensors
--safetensors-dir must point to a directory containing .safetensors files
and required configuration files (e.g. model config, tokenizer files).
All files under the directory (including nested subdirectories) are
automatically discovered. Each file is packaged as a separate OCI layer.
DDUF
--dduf must point to a .dduf archive file.
Repackaging
--from repackages an existing model. You may override selected properties
such as --context-size to create a variant of the original model.
Multimodal models
Use --mmproj to include a multimodal projector file.
### Options
@@ -12,10 +46,11 @@ When packaging from an existing model using --from, you can modify properties li
|:--------------------|:--------------|:--------|:---------------------------------------------------------------------------------------|
| `--chat-template` | `string` | | absolute path to chat template file (must be Jinja format) |
| `--context-size` | `uint64` | `0` | context size in tokens |
| `--dir-tar` | `stringArray` | | relative path to directory to package as tar (can be specified multiple times) |
| `--dduf` | `string` | | absolute path to DDUF archive file (Diffusers Unified Format) |
| `--from` | `string` | | reference to an existing model to repackage |
| `--gguf` | `string` | | absolute path to gguf file |
| `-l`, `--license` | `stringArray` | | absolute path to a license file |
| `--mmproj` | `string` | | absolute path to multimodal projector file |
| `--push` | `bool` | | push to registry (if not set, the model is loaded into the Model Runner content store) |
| `--safetensors-dir` | `string` | | absolute path to directory containing safetensors files and config |

View File

@@ -3,12 +3,6 @@
<!---MARKER_GEN_START-->
Pull a model from Docker Hub or HuggingFace to your local environment
### Options
| Name | Type | Default | Description |
|:--------------------------------|:-------|:--------|:----------------------------------------------------------------------------------|
| `--ignore-runtime-memory-check` | `bool` | | Do not block pull if estimated runtime memory for model exceeds system resources. |
<!---MARKER_GEN_END-->

View File

@@ -1,7 +1,7 @@
# docker model push
<!---MARKER_GEN_START-->
Push a model to Docker Hub
Push a model to Docker Hub or Hugging Face
<!---MARKER_GEN_END-->

View File

@@ -7,12 +7,17 @@ Reinstall Docker Model Runner (Docker Engine only)
| Name | Type | Default | Description |
|:-----------------|:---------|:------------|:-------------------------------------------------------------------------------------------------------|
| `--backend` | `string` | | Specify backend (llama.cpp\|vllm). Default: llama.cpp |
| `--backend` | `string` | | Specify backend (llama.cpp\|vllm\|diffusers). Default: llama.cpp |
| `--debug` | `bool` | | Enable debug logging |
| `--do-not-track` | `bool` | | Do not track models usage in Docker Model Runner |
| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda\|rocm\|musa\|cann) |
| `--host` | `string` | `127.0.0.1` | Host address to bind Docker Model Runner |
| `--port` | `uint16` | `0` | Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) |
| `--proxy-cert` | `string` | | Path to a CA certificate file for proxy SSL inspection |
| `--tls` | `bool` | | Enable TLS/HTTPS for Docker Model Runner API |
| `--tls-cert` | `string` | | Path to TLS certificate file (auto-generated if not provided) |
| `--tls-key` | `string` | | Path to TLS private key file (auto-generated if not provided) |
| `--tls-port` | `uint16` | `0` | TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode) |
<!---MARKER_GEN_END-->

View File

@@ -12,6 +12,7 @@ Restart Docker Model Runner (Docker Engine only)
| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda\|rocm\|musa\|cann) |
| `--host` | `string` | `127.0.0.1` | Host address to bind Docker Model Runner |
| `--port` | `uint16` | `0` | Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) |
| `--proxy-cert` | `string` | | Path to a CA certificate file for proxy SSL inspection |
<!---MARKER_GEN_END-->

View File

@@ -5,12 +5,12 @@ Run a model and interact with it using a submitted prompt or chat mode
### Options
| Name | Type | Default | Description |
|:--------------------------------|:---------|:--------|:----------------------------------------------------------------------------------|
| `--color` | `string` | `no` | Use colored output (auto\|yes\|no) |
| `--debug` | `bool` | | Enable debug logging |
| `-d`, `--detach` | `bool` | | Load the model in the background without interaction |
| `--ignore-runtime-memory-check` | `bool` | | Do not block pull if estimated runtime memory for model exceeds system resources. |
| Name | Type | Default | Description |
|:-----------------|:---------|:--------|:-----------------------------------------------------|
| `--color` | `string` | `no` | Use colored output (auto\|yes\|no) |
| `--debug` | `bool` | | Enable debug logging |
| `-d`, `--detach` | `bool` | | Load the model in the background without interaction |
| `--openaiurl` | `string` | | OpenAI-compatible API endpoint URL to chat with |
<!---MARKER_GEN_END-->

View File

@@ -0,0 +1,27 @@
# docker model search
<!---MARKER_GEN_START-->
Search for models from Docker Hub (ai/ namespace) and HuggingFace.
When no search term is provided, lists all available models.
When a search term is provided, filters models by name/description.
Examples:
docker model search # List available models from Docker Hub
docker model search llama # Search for models containing "llama"
docker model search --source=all # Search both Docker Hub and HuggingFace
docker model search --source=huggingface # Only search HuggingFace
docker model search --limit=50 phi # Search with custom limit
docker model search --json llama # Output as JSON
### Options
| Name | Type | Default | Description |
|:----------------|:---------|:--------|:----------------------------------------------|
| `--json` | `bool` | | Output results as JSON |
| `-n`, `--limit` | `int` | `32` | Maximum number of results to show |
| `--source` | `string` | `all` | Source to search: all, dockerhub, huggingface |
<!---MARKER_GEN_END-->

View File

@@ -0,0 +1,14 @@
# docker model show
<!---MARKER_GEN_START-->
Display detailed information about a model in a human-readable format.
### Options
| Name | Type | Default | Description |
|:-----------------|:-------|:--------|:----------------------------|
| `-r`, `--remote` | `bool` | | Show info for remote models |
<!---MARKER_GEN_END-->

View File

@@ -0,0 +1,32 @@
# docker model skills
<!---MARKER_GEN_START-->
Install Docker Model Runner skills for AI coding assistants.
Skills are configuration files that help AI coding assistants understand
how to use Docker Model Runner effectively for local model inference.
Supported targets:
--codex Install to ~/.codex/skills (OpenAI Codex CLI)
--claude Install to ~/.claude/skills (Claude Code)
--opencode Install to ~/.config/opencode/skills (OpenCode)
--dest Install to a custom directory
Example:
docker model skills --claude
docker model skills --codex --claude
docker model skills --dest /path/to/skills
### Options
| Name | Type | Default | Description |
|:----------------|:---------|:--------|:--------------------------------------------------------|
| `--claude` | `bool` | | Install skills for Claude Code (~/.claude/skills) |
| `--codex` | `bool` | | Install skills for OpenAI Codex CLI (~/.codex/skills) |
| `--dest` | `string` | | Install skills to a custom directory |
| `-f`, `--force` | `bool` | | Overwrite existing skills without prompting |
| `--opencode` | `bool` | | Install skills for OpenCode (~/.config/opencode/skills) |
<!---MARKER_GEN_END-->

View File

@@ -5,13 +5,19 @@ Start Docker Model Runner (Docker Engine only)
### Options
| Name | Type | Default | Description |
|:-----------------|:---------|:--------|:-------------------------------------------------------------------------------------------------------|
| `--backend` | `string` | | Specify backend (llama.cpp\|vllm). Default: llama.cpp |
| `--debug` | `bool` | | Enable debug logging |
| `--do-not-track` | `bool` | | Do not track models usage in Docker Model Runner |
| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda\|rocm\|musa\|cann) |
| `--port` | `uint16` | `0` | Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) |
| Name | Type | Default | Description |
|:-----------------|:---------|:------------|:-------------------------------------------------------------------------------------------------------|
| `--backend` | `string` | | Specify backend (llama.cpp\|vllm\|diffusers). Default: llama.cpp |
| `--debug` | `bool` | | Enable debug logging |
| `--do-not-track` | `bool` | | Do not track models usage in Docker Model Runner |
| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda\|rocm\|musa\|cann) |
| `--host` | `string` | `127.0.0.1` | Host address to bind Docker Model Runner |
| `--port` | `uint16` | `0` | Docker container port for Docker Model Runner (default: 12434 for Docker Engine, 12435 for Cloud mode) |
| `--proxy-cert` | `string` | | Path to a CA certificate file for proxy SSL inspection |
| `--tls` | `bool` | | Enable TLS/HTTPS for Docker Model Runner API |
| `--tls-cert` | `string` | | Path to TLS certificate file (auto-generated if not provided) |
| `--tls-key` | `string` | | Path to TLS private key file (auto-generated if not provided) |
| `--tls-port` | `uint16` | `0` | TLS port for Docker Model Runner (default: 12444 for Docker Engine, 12445 for Cloud mode) |
<!---MARKER_GEN_END-->

4
_vendor/modules.txt generated
View File

@@ -1,6 +1,6 @@
# github.com/moby/moby/api v1.53.0
# github.com/moby/buildkit v0.27.0
# github.com/docker/buildx v0.31.1
# github.com/docker/cli v29.2.0+incompatible
# github.com/docker/cli v29.2.1+incompatible
# github.com/docker/compose/v5 v5.0.2
# github.com/docker/model-runner/cmd/cli v1.0.3
# github.com/docker/model-runner v1.1.9-0.20260303081710-59280ed7abd5

8
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/docker/docs
go 1.25.0
go 1.26.0
// This go.mod file is used by hugo to vendor documentation from upstream
// reposities. Use the "require" section to specify the version of the
@@ -9,9 +9,9 @@ go 1.25.0
// Make sure to add an entry in the "tools" section when adding a new repository.
require (
github.com/docker/buildx v0.31.1
github.com/docker/cli v29.2.0+incompatible
github.com/docker/cli v29.2.1+incompatible
github.com/docker/compose/v5 v5.0.2
github.com/docker/model-runner/cmd/cli v1.0.3
github.com/docker/model-runner v1.1.9-0.20260303081710-59280ed7abd5
github.com/moby/buildkit v0.27.0
github.com/moby/moby/api v1.53.0
)
@@ -20,7 +20,7 @@ tool (
github.com/docker/buildx
github.com/docker/cli
github.com/docker/compose/v5
github.com/docker/model-runner/cmd/cli
github.com/docker/model-runner
github.com/docker/scout-cli
github.com/moby/buildkit
github.com/moby/moby/api

40
go.sum
View File

@@ -10,6 +10,8 @@ github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuP
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY=
@@ -60,6 +62,8 @@ github.com/docker/cli v29.1.5+incompatible h1:GckbANUt3j+lsnQ6eCcQd70mNSOismSHWt
github.com/docker/cli v29.1.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v29.2.0+incompatible h1:9oBd9+YM7rxjZLfyMGxjraKBKE4/nVyvVfN4qNl9XRM=
github.com/docker/cli v29.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg=
github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/compose/v5 v5.0.0 h1:J2uMCzJ/5xLcoIVVXvMmPe6HBzVQpmJThKa7Qk7Xldc=
github.com/docker/compose/v5 v5.0.0/go.mod h1:BurapGv8zmYnsbSmlpCz5EU2Pi3YFV/PjeUnoFpcw64=
github.com/docker/compose/v5 v5.0.1 h1:5yCjDJbwUqcuI+6WNFHNWz2/3vyBDsNnfe8LlFjyxEc=
@@ -82,6 +86,10 @@ github.com/docker/mcp-gateway v0.22.0 h1:l4t+HRNHxR7Jn545KDeXaeiEEhkCDBeWMTyuCaX
github.com/docker/mcp-gateway v0.22.0/go.mod h1:4sGoRoMeu8lj4a/HbAd/jdxXjMNVMegNnjCqmhsED+I=
github.com/docker/model-runner v1.0.3 h1:04qSAOsFvxOYIejMOc2XGug+WQSTax0RO0OMA7DkwTU=
github.com/docker/model-runner v1.0.3/go.mod h1:qRIuXMeZ5dnL4A9e/+BUtQOSZAS0PBxUkLN51XYpLOE=
github.com/docker/model-runner v1.1.8 h1:dAdrvz5oHxnWToM732NOtbwK5hBSUQpX87JQ6AQPDUE=
github.com/docker/model-runner v1.1.8/go.mod h1:BUkuBVgWQhMfN6rY+1KesorT9lwG2D0xCNY8Co9xKCI=
github.com/docker/model-runner v1.1.9-0.20260303081710-59280ed7abd5 h1:oY1RUGY7sDj5DUjqlxWAIJR4olBp3298iL8mWYFjXFg=
github.com/docker/model-runner v1.1.9-0.20260303081710-59280ed7abd5/go.mod h1:S1ttvsnofZx35unrXuzhcbZ7fWOpNhLPawhNrRMHp+c=
github.com/docker/model-runner/cmd/cli v1.0.3 h1:oycm6fHwhFBNM47Y2Ka7nUSsrbSG3FL7NMShjTRuxak=
github.com/docker/model-runner/cmd/cli v1.0.3/go.mod h1:86LCLsk93vuevYRDKoBxwGusyGlW+UnKCnbXJ7m6Zjo=
github.com/docker/model-runner/pkg/go-containerregistry v0.0.0-20251121150728-6951a2a36575 h1:N2yLWYSZFTVLkLTh8ux1Z0Nug/F78pXsl2KDtbWhe+Y=
@@ -120,16 +128,21 @@ github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gpustack/gguf-parser-go v0.22.1 h1:FRnEDWqT0Rcplr/R9ctCRSN2+3DhVsf6dnR5/i9JA4E=
github.com/gpustack/gguf-parser-go v0.22.1/go.mod h1:y4TwTtDqFWTK+xvprOjRUh+dowgU2TKCX37vRKvGiZ0=
github.com/gpustack/gguf-parser-go v0.24.0 h1:tdJceXYp9e5RhE9RwVYIuUpir72Jz2D68NEtDXkKCKc=
github.com/gpustack/gguf-parser-go v0.24.0/go.mod h1:y4TwTtDqFWTK+xvprOjRUh+dowgU2TKCX37vRKvGiZ0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
github.com/henvic/httpretty v0.1.4 h1:Jo7uwIRWVFxkqOnErcoYfH90o3ddQyVrSANeS4cxYmU=
github.com/henvic/httpretty v0.1.4/go.mod h1:Dn60sQTZfbt2dYsdUSNsCljyF4AfdqnuJFDLJA1I4AM=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jaypipes/ghw v0.19.1 h1:Lhybk6aadgEJqIxeS0h07UOL/EgMGIdxbAy6V8J7RgY=
github.com/jaypipes/ghw v0.19.1/go.mod h1:GPrvwbtPoxYUenr74+nAnWbardIZq600vJDD5HnPsPE=
github.com/jaypipes/ghw v0.23.0 h1:WOL4hpLcIu1kIm+z5Oz19Tk1HNw/Sncrx/6GS8O0Kl0=
github.com/jaypipes/ghw v0.23.0/go.mod h1:fUNUjMZ0cjahKo+/u+32m9FutIx53Nkbi0Ti0m7j5HY=
github.com/jaypipes/pcidb v1.1.1 h1:QmPhpsbmmnCwZmHeYAATxEaoRuiMAJusKYkUncMC0ro=
github.com/jaypipes/pcidb v1.1.1/go.mod h1:x27LT2krrUgjf875KxQXKB0Ha/YXLdZRVmw6hH0G7g8=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
@@ -141,6 +154,8 @@ github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3J
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/kolesnikovae/go-winjob v1.0.0 h1:OKEtCHB3sYNAiqNwGDhf08Y6luM7C8mP+42rp1N6SeE=
github.com/kolesnikovae/go-winjob v1.0.0/go.mod h1:k0joOLP3/NBrRmDQjPV2+oN1TPmEWt6arTNtFjVeQuM=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
@@ -153,6 +168,7 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.20 h1:WcT52H91ZUAwy8+HUkdM3THM6gXqXuLJi9O3rjcQQaQ=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
@@ -172,6 +188,7 @@ github.com/moby/moby/api v1.53.0 h1:PihqG1ncw4W+8mZs69jlwGXdaYBeb5brF6BL7mPIS/w=
github.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc=
github.com/moby/moby/client v0.2.1 h1:1Grh1552mvv6i+sYOdY+xKKVTvzJegcVMhuXocyDz/k=
github.com/moby/moby/client v0.2.1/go.mod h1:O+/tw5d4a1Ha/ZA/tPxIZJapJRUS6LNZ1wiVRxYHyUE=
github.com/moby/moby/client v0.2.2 h1:Pt4hRMCAIlyjL3cr8M5TrXCwKzguebPAc2do2ur7dEM=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
@@ -209,6 +226,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI=
github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q=
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
@@ -253,22 +272,32 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 h1:NOyNnS19BF2SUDApbOKbDtWZ0IK7b8FJ2uAGdIWOGb0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
@@ -279,6 +308,8 @@ golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU=
golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -295,6 +326,7 @@ golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -316,16 +348,20 @@ golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -334,6 +370,8 @@ golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -344,10 +382,12 @@ google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101 h1:vk5TfqZHNn0obhPIYeS+cxIFKFQgser/M2jnI+9c6MM=
google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101/go.mod h1:E17fc4PDhkr22dE3RgnH2hEubUaky6ZwW4VhANxyspg=
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=

View File

@@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1
ARG GO_VERSION=1.25
ARG GO_VERSION=1.26
FROM scratch AS sitedir

View File

@@ -1,6 +1,6 @@
module github.com/docker/docs/hack/releaser
go 1.25.0
go 1.26.0
require (
github.com/alecthomas/kong v1.4.0

View File

@@ -361,8 +361,8 @@ module:
files: ["*.yaml"]
# Model CLI
- path: github.com/docker/model-runner/cmd/cli
- path: github.com/docker/model-runner
mounts:
- source: docs/reference
- source: cmd/cli/docs/reference
target: data/cli/model
files: ["*.yaml"]