diff --git a/Dockerfile b/Dockerfile index 7fd9846510..ea9311d396 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,81 +2,101 @@ # from the master branch of https://github.com/docker/docker.github.io # # Here is the sequence: -# 1. Set up the build -# 2. Fetch upstream resources -# 3. Build static HTML from master -# 4. Reset to clean tiny nginx image -# 5. Copy Nginx config and archive HTML, which don't change often and can be cached -# 6. Copy static HTML from previous build stage (step 3) +# 1. Set up base stages for building and deploying +# 2. Collect and build the archived documentation +# 3. Collect and build the reference documentation (from upstream resources) +# 4. Build static HTML from the current branch +# 5. Build the final image, combining the archives, reference docs, and +# current version of the documentation # # When the image is run, it starts Nginx and serves the docs at port 4000 -# Get basic configs and Jekyll env -FROM docs/docker.github.io:docs-builder AS builder - -# Set the target again -ENV TARGET=/usr/share/nginx/html - -# Set the source directory to md_source -ENV SOURCE=md_source - -# Get the current docs from the checked out branch -# ${SOURCE} will contain a directory for each archive -COPY . ${SOURCE} - -####### START UPSTREAM RESOURCES ######## -# Set vars used by fetch-upstream-resources.sh script -## Branch to pull from, per ref doc -## To get master from svn the svn branch needs to be 'trunk'. To get a branch from svn it needs to be 'branches/branchname' # Engine -ENV ENGINE_SVN_BRANCH="branches/18.09.x" -ENV ENGINE_BRANCH="18.09.x" +ARG ENGINE_BRANCH="18.09.x" # Distribution -ENV DISTRIBUTION_SVN_BRANCH="branches/release/2.6" -ENV DISTRIBUTION_BRANCH="release/2.6" - -# Fetch upstream resources -RUN bash ./${SOURCE}/_scripts/fetch-upstream-resources.sh ${SOURCE} -####### END UPSTREAM RESOURCES ######## +ARG DISTRIBUTION_BRANCH="release/2.6" -# Build the static HTML, now that everything is in place +### +# Set up base stages for building and deploying +### -RUN jekyll build -s ${SOURCE} -d ${TARGET} --config ${SOURCE}/_config.yml +# Get basic configs and Jekyll env +FROM docs/docker.github.io:docs-builder AS builderbase +ENV TARGET=/usr/share/nginx/html +WORKDIR /usr/src/app/md_source/ -# Fix up some links, don't touch the archives -RUN find ${TARGET} -type f -name '*.html' | grep -vE "v[0-9]+\." | while read i; do sed -i 's#href="https://docs.docker.com/#href="/#g' "$i"; done +# Set vars used by fetch-upstream-resources.sh script +# Branch to pull from, per ref doc. To get master from svn the svn branch needs +# to be 'trunk'. To get a branch from svn it needs to be 'branches/branchname' +ARG ENGINE_BRANCH +ENV ENGINE_BRANCH=${ENGINE_BRANCH} +ENV ENGINE_SVN_BRANCH=branches/${ENGINE_BRANCH} + +ARG DISTRIBUTION_BRANCH +ENV DISTRIBUTION_BRANCH=${DISTRIBUTION_BRANCH} +ENV DISTRIBUTION_SVN_BRANCH=branches/${DISTRIBUTION_BRANCH} -# BUILD OF MASTER DOCS IS NOW DONE! # Reset to alpine so we don't get any docs source or extra apps -FROM nginx:alpine - -# Set the target again +FROM nginx:alpine AS deploybase ENV TARGET=/usr/share/nginx/html # Get the nginx config from the nginx-onbuild image # This hardly ever changes so should usually be cached COPY --from=docs/docker.github.io:nginx-onbuild /etc/nginx/conf.d/default.conf /etc/nginx/conf.d/default.conf -# Get all the archive static HTML and put it into place -# Go oldest-to-newest to take advantage of the fact that we change older -# archives less often than new ones. -# To add a new archive, add it here -# AND ALSO edit _data/docsarchives/archives.yaml to add it to the drop-down +# Set the default command to serve the static HTML site +CMD echo -e "Docker docs are viewable at:\nhttp://0.0.0.0:4000"; exec nginx -g 'daemon off;' + + +# Build the archived docs +# these docs barely change, so can be cached +FROM deploybase AS archives +# Get all the archive static HTML and put it into place. To add a new archive, +# add it here, and ALSO edit _data/docsarchives/archives.yaml to add it to the drop-down COPY --from=docs/docker.github.io:v17.03 ${TARGET} ${TARGET} COPY --from=docs/docker.github.io:v17.06 ${TARGET} ${TARGET} COPY --from=docs/docker.github.io:v17.09 ${TARGET} ${TARGET} COPY --from=docs/docker.github.io:v17.12 ${TARGET} ${TARGET} COPY --from=docs/docker.github.io:v18.03 ${TARGET} ${TARGET} -# Get the built docs output from the previous build stage -# This ordering means all previous layers can come from cache unless an archive -# changes +# Fetch library samples (documentation from official images on Docker Hub) +# Only add the files that are needed to build these reference docs, so that +# these docs are only rebuilt if changes were made to the configuration. +# @todo find a way to build HTML in this stage, and still have them included in the navigation tree +FROM builderbase AS library-samples +COPY ./_scripts/fetch-library-samples.sh ./_scripts/ +COPY ./_samples/boilerplate.txt ./_samples/ +RUN bash ./_scripts/fetch-library-samples.sh -COPY --from=builder ${TARGET} ${TARGET} +# Fetch upstream resources (reference documentation) +# Only add the files that are needed to build these reference docs, so that +# these docs are only rebuilt if changes were made to the configuration. +FROM builderbase AS upstream-resources +COPY ./_scripts/fetch-upstream-resources.sh ./_scripts/ +COPY ./_config.yml . +COPY ./_data/toc.yaml ./_data/ +RUN bash ./_scripts/fetch-upstream-resources.sh . -# Serve the site (target), which is now all static HTML -CMD echo -e "Docker docs are viewable at:\nhttp://0.0.0.0:4000"; exec nginx -g 'daemon off;' + +# Build the current docs from the checked out branch +FROM builderbase AS current +COPY . . +COPY --from=library-samples /usr/src/app/md_source/. ./ +COPY --from=upstream-resources /usr/src/app/md_source/. ./ + +# Build the static HTML, now that everything is in place +RUN jekyll build -d ${TARGET} + +# Fix up some links, don't touch the archives +RUN find ${TARGET} -type f -name '*.html' | grep -vE "v[0-9]+\." | while read i; do sed -i 's#href="https://docs.docker.com/#href="/#g' "$i"; done + + +# Docs with archives (for deploy) +FROM archives AS deploy + +# Add the current version of the docs +COPY --from=current ${TARGET} ${TARGET} diff --git a/Jenkinsfile b/Jenkinsfile index 3b54bd3e61..a03b42843a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -168,4 +168,4 @@ pipeline { } } } -} +} \ No newline at end of file diff --git a/README.md b/README.md index d700366c81..bff722714d 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ of [https://docs.docker.com/](https://docs.docker.com/). ## Staging the docs -You have two options: +You have three options: 1. On your local machine, clone this repo and run our staging container: @@ -169,7 +169,17 @@ You have two options: running on http://localhost:4000/ by default. To stop it, use `CTRL+C`. You can continue working in a second terminal and Jekyll will rebuild the website incrementally. Refresh the browser to preview your changes. + +3. Build and run a Docker image for your working branch. + + ```bash + $ docker build -t docker build -t docs/docker.github.io: . + $ docker run --rm -it -p 4000:4000 docs/docker.github.io: + ``` + After the `docker run` command, copy the URL provided in the container build output in a browser, + http://0.0.0.0:4000, and verify your changes. + ## Read these docs offline To read the docs offline, you can use either a standalone container or a swarm service. diff --git a/_config.yml b/_config.yml index 0711e1a7ee..9c695ca185 100644 --- a/_config.yml +++ b/_config.yml @@ -22,7 +22,9 @@ exclude: ["_scripts", "apidocs/layouts", "Gemfile", "hooks", "index.html", "404. latest_engine_api_version: "1.39" docker_ce_version: "18.09" docker_ee_version: "18.09" -compose_version: "1.23.2" +compose_version: "1.24.0" +compose_file_v3: "3.7" +compose_file_v2: "2.4" machine_version: "0.16.0" distribution_version: "2.6" dtr_version: "2.6" @@ -94,7 +96,7 @@ defaults: - scope: path: "install" values: - win_latest_build: "docker-18.09.2" + win_latest_build: "docker-18.09.6" - scope: path: "datacenter" values: @@ -104,28 +106,28 @@ defaults: values: dtr_org: "docker" dtr_repo: "dtr" - dtr_version: "2.6.2" + dtr_version: "2.6.6" - scope: path: "datacenter/dtr/2.5" values: hide_from_sitemap: true dtr_org: "docker" dtr_repo: "dtr" - dtr_version: "2.5.8" + dtr_version: "2.5.11" - scope: path: "datacenter/dtr/2.4" values: hide_from_sitemap: true dtr_org: "docker" dtr_repo: "dtr" - dtr_version: "2.4.9" + dtr_version: "2.4.12" - scope: path: "datacenter/dtr/2.3" values: hide_from_sitemap: true dtr_org: "docker" dtr_repo: "dtr" - dtr_version: "2.3.10" + dtr_version: "2.3.11" - scope: path: "datacenter/dtr/2.2" values: @@ -147,29 +149,29 @@ defaults: values: ucp_org: "docker" ucp_repo: "ucp" - ucp_version: "3.1.2" + ucp_version: "3.1.7" - scope: # This is a bit of a hack for the get-support.md topic. path: "ee" values: ucp_org: "docker" ucp_repo: "ucp" dtr_repo: "dtr" - ucp_version: "3.1.3" - dtr_version: "2.6.2" + ucp_version: "3.1.7" + dtr_version: "2.6.6" - scope: path: "datacenter/ucp/3.0" values: hide_from_sitemap: true ucp_org: "docker" ucp_repo: "ucp" - ucp_version: "3.0.9" + ucp_version: "3.0.11" - scope: path: "datacenter/ucp/2.2" values: hide_from_sitemap: true ucp_org: "docker" ucp_repo: "ucp" - ucp_version: "2.2.16" + ucp_version: "2.2.18" - scope: path: "datacenter/ucp/2.1" values: diff --git a/_config_authoring.yml b/_config_authoring.yml index f95761ac42..ca8a6212a8 100644 --- a/_config_authoring.yml +++ b/_config_authoring.yml @@ -22,7 +22,9 @@ url: https://docs.docker.com latest_engine_api_version: "1.39" docker_ce_version: "18.09" docker_ee_version: "18.09" -compose_version: "1.23.2" +compose_version: "1.24.0" +compose_file_v3: "3.7" +compose_file_v2: "2.4" machine_version: "0.16.0" distribution_version: "2.6" dtr_version: "2.6" diff --git a/_data/ddc_offline_files_2.yaml b/_data/ddc_offline_files_2.yaml index 30078552a1..2003244abc 100644 --- a/_data/ddc_offline_files_2.yaml +++ b/_data/ddc_offline_files_2.yaml @@ -6,6 +6,46 @@ - product: "ucp" version: "3.1" tar-files: + - description: "3.1.7 Linux" + url: https://packages.docker.com/caas/ucp_images_3.1.7.tar.gz + - description: "3.1.7 Windows Server 2016 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2016_3.1.7.tar.gz + - description: "3.1.7 Windows Server 1709" + url: https://packages.docker.com/caas/ucp_images_win_1709_3.1.7.tar.gz + - description: "3.1.7 Windows Server 1803" + url: https://packages.docker.com/caas/ucp_images_win_1803_3.1.7.tar.gz + - description: "3.1.7 Windows Server 2019 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2019_3.1.7.tar.gz + - description: "3.1.6 Linux" + url: https://packages.docker.com/caas/ucp_images_3.1.6.tar.gz + - description: "3.1.6 Windows Server 2016 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2016_3.1.6.tar.gz + - description: "3.1.6 Windows Server 1709" + url: https://packages.docker.com/caas/ucp_images_win_1709_3.1.6.tar.gz + - description: "3.1.6 Windows Server 1803" + url: https://packages.docker.com/caas/ucp_images_win_1803_3.1.6.tar.gz + - description: "3.1.6 Windows Server 2019 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2019_3.1.6.tar.gz + - description: "3.1.5 Linux" + url: https://packages.docker.com/caas/ucp_images_3.1.5.tar.gz + - description: "3.1.5 Windows Server 2016 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2016_3.1.5.tar.gz + - description: "3.1.5 Windows Server 1709" + url: https://packages.docker.com/caas/ucp_images_win_1709_3.1.5.tar.gz + - description: "3.1.5 Windows Server 1803" + url: https://packages.docker.com/caas/ucp_images_win_1803_3.1.5.tar.gz + - description: "3.1.5 Windows Server 2019 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2019_3.1.5.tar.gz + - description: "3.1.4 Linux" + url: https://packages.docker.com/caas/ucp_images_3.1.4.tar.gz + - description: "3.1.4 Windows Server 2016 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2016_3.1.4.tar.gz + - description: "3.1.4 Windows Server 1709" + url: https://packages.docker.com/caas/ucp_images_win_1709_3.1.4.tar.gz + - description: "3.1.4 Windows Server 1803" + url: https://packages.docker.com/caas/ucp_images_win_1803_3.1.4.tar.gz + - description: "3.1.4 Windows Server 2019 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2019_3.1.4.tar.gz - description: "3.1.3 Linux" url: https://packages.docker.com/caas/ucp_images_3.1.3.tar.gz - description: "3.1.3 Windows Server 2016 LTSC" @@ -43,6 +83,26 @@ - product: "ucp" version: "3.0" tar-files: + - description: "3.0.11 Linux" + url: https://packages.docker.com/caas/ucp_images_3.0.11.tar.gz + - description: "3.0.11 IBM Z" + url: https://packages.docker.com/caas/ucp_images_s390x_3.0.11.tar.gz + - description: "3.0.11 Windows Server 2016 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2016_3.0.11.tar.gz + - description: "3.0.11 Windows Server 1709" + url: https://packages.docker.com/caas/ucp_images_win_1709_3.0.11.tar.gz + - description: "3.0.11 Windows Server 1803" + url: https://packages.docker.com/caas/ucp_images_win_1803_3.0.11.tar.gz + - description: "3.0.10 Linux" + url: https://packages.docker.com/caas/ucp_images_3.0.10.tar.gz + - description: "3.0.10 IBM Z" + url: https://packages.docker.com/caas/ucp_images_s390x_3.0.10.tar.gz + - description: "3.0.10 Windows Server 2016 LTSC" + url: https://packages.docker.com/caas/ucp_images_win_2016_3.0.10.tar.gz + - description: "3.0.10 Windows Server 1709" + url: https://packages.docker.com/caas/ucp_images_win_1709_3.0.10.tar.gz + - description: "3.0.10 Windows Server 1803" + url: https://packages.docker.com/caas/ucp_images_win_1803_3.0.10.tar.gz - description: "3.0.9 Linux" url: https://packages.docker.com/caas/ucp_images_3.0.9.tar.gz - description: "3.0.9 IBM Z" @@ -126,6 +186,18 @@ - product: "ucp" version: "2.2" tar-files: + - description: "2.2.18 Linux" + url: https://packages.docker.com/caas/ucp_images_2.2.18.tar.gz + - description: "2.2.18 IBM Z" + url: https://packages.docker.com/caas/ucp_images_s390x_2.2.18.tar.gz + - description: "2.2.18 Windows" + url: https://packages.docker.com/caas/ucp_images_win_2.2.18.tar.gz + - description: "2.2.17 Linux" + url: https://packages.docker.com/caas/ucp_images_2.2.17.tar.gz + - description: "2.2.17 IBM Z" + url: https://packages.docker.com/caas/ucp_images_s390x_2.2.17.tar.gz + - description: "2.2.17 Windows" + url: https://packages.docker.com/caas/ucp_images_win_2.2.17.tar.gz - description: "2.2.16 Linux" url: https://packages.docker.com/caas/ucp_images_2.2.16.tar.gz - description: "2.2.16 IBM Z" @@ -219,6 +291,14 @@ - product: "dtr" version: "2.6" tar-files: + - description: "DTR 2.6.6 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.6.6.tar.gz + - description: "DTR 2.6.5 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.6.5.tar.gz + - description: "DTR 2.6.4 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.6.4.tar.gz + - description: "DTR 2.6.3 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.6.3.tar.gz - description: "DTR 2.6.2 Linux x86" url: https://packages.docker.com/caas/dtr_images_2.6.2.tar.gz - description: "DTR 2.6.1 Linux x86" @@ -228,6 +308,12 @@ - product: "dtr" version: "2.5" tar-files: + - description: "DTR 2.5.11 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.5.11.tar.gz + - description: "DTR 2.5.10 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.5.10.tar.gz + - description: "DTR 2.5.9 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.5.9.tar.gz - description: "DTR 2.5.8 Linux x86" url: https://packages.docker.com/caas/dtr_images_2.5.8.tar.gz - description: "DTR 2.5.7 Linux x86" @@ -247,6 +333,12 @@ - product: "dtr" version: "2.4" tar-files: + - description: "DTR 2.4.12 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.4.12.tar.gz + - description: "DTR 2.4.11 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.4.11.tar.gz + - description: "DTR 2.4.10 Linux x86" + url: https://packages.docker.com/caas/dtr_images_2.4.10.tar.gz - description: "DTR 2.4.9 Linux x86" url: https://packages.docker.com/caas/dtr_images_2.4.9.tar.gz - description: "DTR 2.4.8 Linux x86" @@ -284,6 +376,8 @@ - product: "dtr" version: "2.3" tar-files: + - description: "DTR 2.3.11" + url: https://packages.docker.com/caas/dtr_images_2.3.11.tar.gz - description: "DTR 2.3.10" url: https://packages.docker.com/caas/dtr_images_2.3.10.tar.gz - description: "DTR 2.3.9" diff --git a/_data/engine-cli/docker_system_df.yaml b/_data/engine-cli/docker_system_df.yaml index 99954d3d5b..d8462e1b65 100644 --- a/_data/engine-cli/docker_system_df.yaml +++ b/_data/engine-cli/docker_system_df.yaml @@ -35,6 +35,7 @@ examples: |- Images 5 2 16.43 MB 11.63 MB (70%) Containers 2 0 212 B 212 B (100%) Local Volumes 2 1 36 B 0 B (0%) + Build Cache 0 0 0B 0B ``` A more detailed view can be requested using the `-v, --verbose` flag: @@ -62,6 +63,14 @@ examples: |- NAME LINKS SIZE 07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e 2 36 B my-named-vol 0 0 B + + Build cache usage: 0B + + + CACHE ID CACHE TYPE SIZE CREATED LAST USED USAGE SHARED + 0d8ab63ff30d regular 4.34MB 7 days ago 0 true + 189876ac9226 regular 11.5MB 7 days ago 0 true + ``` * `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data) diff --git a/_data/toc.yaml b/_data/toc.yaml index 8cd4db25f0..1e66542e3d 100644 --- a/_data/toc.yaml +++ b/_data/toc.yaml @@ -295,6 +295,10 @@ guides: - path: /config/containers/logging/configure/ title: Configuring default drivers nosync: true + - sectiontitle: Work with external tools + section: + - path: /config/thirdparty/ + title: Third-party monitoring tools - path: /config/thirdparty/prometheus/ title: Collect Docker metrics with Prometheus - sectiontitle: Configure containers @@ -536,6 +540,12 @@ reference: title: docker attach - path: /engine/reference/commandline/build/ title: docker build + - sectiontitle: docker builder * + section: + - path: /engine/reference/commandline/builder/ + title: docker builder + - path: /engine/reference/commandline/builder_prune/ + title: docker builder prune - sectiontitle: docker checkpoint * section: - path: /engine/reference/commandline/checkpoint/ @@ -1238,7 +1248,7 @@ manuals: title: Using UCP cluster metrics with Prometheus - path: /ee/ucp/admin/configure/configure-rbac-kube/ title: Configure native Kubernetes role-based access control - - path: /ee/ucp/admin/configure/create-audit-logs/ + - path: /ee/ucp/admin/configure/enable-audit-logging/ title: Create UCP audit logs - path: /ee/ucp/admin/configure/enable-saml-authentication/ title: Enable SAML authentication @@ -1276,8 +1286,6 @@ manuals: title: Join Windows worker nodes to your cluster - path: /ee/ucp/admin/configure/join-nodes/use-a-load-balancer/ title: Use a load balancer - - path: /ee/ucp/admin/configure/integrate-with-multiple-registries/ - title: Integrate with multiple registries - path: /ee/ucp/admin/configure/deploy-route-reflectors/ title: Improve network performance with Route Reflectors - sectiontitle: Monitor and troubleshoot @@ -1323,8 +1331,6 @@ manuals: title: Isolate nodes - path: /ee/ucp/authorization/pull-images/ title: Allow users to pull images - - path: /ee/ucp/authorization/migrate-kubernetes-roles/ - title: Migrate Kubernetes roles to Docker EE authorization - path: /ee/ucp/authorization/ee-standard/ title: Docker EE Standard use case - path: /ee/ucp/authorization/ee-advanced/ @@ -1364,15 +1370,22 @@ manuals: - title: Offline installation path: /ee/ucp/interlock/deploy/offline-install/ - title: Layer 7 routing upgrade +<<<<<<< HEAD + path: /ee/ucp/interlock/deploy/upgrade/ +======= path: /ee/ucp/interlock/upgrade/ +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 - sectiontitle: Configuration section: - title: Configure your deployment path: /ee/ucp/interlock/config/ +<<<<<<< HEAD +======= - title: Using a custom extension template path: /ee/ucp/interlock/config/custom-template/ - title: Configuring an HAProxy extension path: /ee/ucp/interlock/config/haproxy-config/ +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 - title: Configuring host mode networking path: /ee/ucp/interlock/config/host-mode-networking/ - title: Configuring an nginx extension @@ -1391,32 +1404,42 @@ manuals: path: /ee/ucp/interlock/usage/canary/ - title: Using context or path-based routing path: /ee/ucp/interlock/usage/context/ +<<<<<<< HEAD + - title: Specifying a routing mode + path: /ee/ucp/interlock/usage/interlock-vip-mode/ + - title: Using routing labels + path: /ee/ucp/interlock/usage/labels-reference/ +======= - title: Publishing a default host service path: /ee/ucp/interlock/usage/default-backend/ - title: Specifying a routing mode path: /ee/ucp/interlock/usage/interlock-vip-mode/ - title: Using routing labels path: /ee/ucp/interlock/usage/labels-reference.md/ +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 - title: Implementing redirects path: /ee/ucp/interlock/usage/redirects/ - title: Implementing a service cluster path: /ee/ucp/interlock/usage/service-clusters/ - title: Implementing persistent (sticky) sessions path: /ee/ucp/interlock/usage/sessions/ +<<<<<<< HEAD + - title: Securing services with TLS + path: /ee/ucp/interlock/usage/tls/ + - title: Configuring websockets + path: /ee/ucp/interlock/usage/websockets/ +======= - title: Implementing SSL path: /ee/ucp/interlock/usage/ssl/ - title: Securing services with TLS path: /ee/ucp/interlock/usage/tls/ - title: Configuring websockets path: /ee/ucp/interlock/usage/websockets/ +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 - sectiontitle: Deploy apps with Kubernetes section: - title: Access Kubernetes Resources path: /ee/ucp/kubernetes/kube-resources/ - - title: Use NFS persistent storage - path: /ee/ucp/admin/configure/use-nfs-volumes/ - - title: Configure AWS EBS Storage for Kubernetes - path: /ee/ucp/kubernetes/configure-aws-storage/ - title: Deploy a workload path: /ee/ucp/kubernetes/ - title: Deploy a Compose-based app @@ -1429,6 +1452,12 @@ manuals: path: /ee/ucp/kubernetes/install-cni-plugin/ - title: Kubernetes network encryption path: /ee/ucp/kubernetes/kubernetes-network-encryption/ + - sectiontitle: Persistent Storage + section: + - title: Use NFS storage + path: /ee/ucp/kubernetes/storage/use-nfs-volumes/ + - title: Use AWS EBS Storage + path: /ee/ucp/kubernetes/storage/configure-aws-storage/ - title: API reference path: /reference/ucp/3.1/api/ nosync: true @@ -1454,6 +1483,8 @@ manuals: title: Install - path: /datacenter/ucp/3.0/guides/admin/install/install-offline/ title: Install offline + - path: /datacenter/ucp/3.0/guides/admin/install/install-on-azure/ + title: Install on Azure - path: /datacenter/ucp/3.0/guides/admin/install/upgrade/ title: Upgrade - path: /datacenter/ucp/3.0/guides/admin/install/upgrade-offline/ @@ -1504,7 +1535,7 @@ manuals: section: - path: /datacenter/ucp/3.0/guides/admin/monitor-and-troubleshoot/ title: Monitor the cluster status - - path: /datacenter/ucp/3.0/admin/monitor-and-troubleshoot/troubleshoot-node-messages/ + - path: /datacenter/ucp/3.0/guides/admin/monitor-and-troubleshoot/troubleshoot-node-messages/ title: Troubleshoot node messages - path: /datacenter/ucp/3.0/guides/admin/monitor-and-troubleshoot/troubleshoot-with-logs/ title: Troubleshoot with logs @@ -1576,27 +1607,75 @@ manuals: title: Web-based access - path: /datacenter/ucp/3.0/guides/user/access-ucp/cli-based-access/ title: CLI-based access - - sectiontitle: Deploy an application + - path: /datacenter/ucp/3.0/guides/user/access-ucp/kubectl/ + title: Install the Kubernetes CLI + - sectiontitle: Deploy apps with Swarm section: - - path: /datacenter/ucp/3.0/guides/user/services/deploy-a-service/ - title: Deploy a service - - path: /datacenter/ucp/3.0/guides/user/services/use-domain-names-to-access-services/ - title: Use domain names to access services - - path: /datacenter/ucp/3.0/guides/user/services/ - title: Deploy an app from the UI - - path: /datacenter/ucp/3.0/guides/user/services/deploy-app-cli/ - title: Deploy an app from the CLI - - path: /datacenter/ucp/3.0/guides/user/services/deploy-stack-to-collection/ + - path: /datacenter/ucp/3.0/guides/user/swarm/ + title: Deploy a single service + - path: /datacenter/ucp/3.0/guides/user/swarm/deploy-multi-service-app/ + title: Deploy a multi-service app + - path: /datacenter/ucp/3.0/guides/user/swarm/deploy-to-collection/ title: Deploy application resources to a collection - - sectiontitle: Secrets + - path: /datacenter/ucp/3.0/guides/user/swarm/use-secrets/ + title: Use secrets in your services + - sectiontitle: Layer 7 routing + section: + - path: /datacenter/ucp/3.0/guides/user/interlock/ + title: Overview + - path: /datacenter/ucp/3.0/guides/user/interlock/architecture/ + title: Architecture + - sectiontitle: Deploy + section: + - title: Simple deployment + path: /datacenter/ucp/3.0/guides/user/interlock/deploy/ + - title: Configure your deployment + path: /datacenter/ucp/3.0/guides/user/interlock/deploy/configure/ + - title: Production deployment + path: /datacenter/ucp/3.0/guides/user/interlock/deploy/production/ + - title: Host mode networking + path: /datacenter/ucp/3.0/guides/user/interlock/deploy/host-mode-networking/ + - title: Configuration reference + path: /datacenter/ucp/3.0/guides/user/interlock/deploy/configuration-reference/ + - sectiontitle: Route traffic to services + section: + - title: Simple swarm service + path: /datacenter/ucp/3.0/guides/user/interlock/usage/ + - title: Set a default service + path: /datacenter/ucp/3.0/guides/user/interlock/usage/default-service/ + - title: Applications with TLS + path: /datacenter/ucp/3.0/guides/user/interlock/usage/tls/ + - title: Application redirects + path: /datacenter/ucp/3.0/guides/user/interlock/usage/redirects/ + - title: Persistent (sticky) sessions + path: /datacenter/ucp/3.0/guides/user/interlock/usage/sessions/ + - title: Websockets + path: /datacenter/ucp/3.0/guides/user/interlock/usage/websockets/ + - title: Canary application instances + path: /datacenter/ucp/3.0/guides/user/interlock/usage/canary/ + - title: Service clusters + path: /datacenter/ucp/3.0/guides/user/interlock/usage/service-clusters/ + - title: Context/Path based routing + path: /datacenter/ucp/3.0/guides/user/interlock/usage/context/ + - title: Service labels reference + path: /datacenter/ucp/3.0/guides/user/interlock/usage/labels-reference/ + - title: Layer 7 routing upgrade + path: /datacenter/ucp/3.0/guides/user/interlock/upgrade/ + - sectiontitle: Deploy apps with Kubernetes section: - - path: /datacenter/ucp/3.0/guides/user/secrets/ - title: Manage secrets - - path: /datacenter/ucp/3.0/guides/user/secrets/grant-revoke-access/ - title: Grant access to secrets + - title: Deploy a workload + path: /datacenter/ucp/3.0/guides/user/kubernetes/ + - title: Deploy a Compose-based app + path: /datacenter/ucp/3.0/guides/user/kubernetes/deploy-with-compose/ + - title: Deploy an ingress controller + path: /datacenter/ucp/3.0/guides/user/kubernetes/layer-7-routing/ + - title: Create a service account for a Kubernetes app + path: /datacenter/ucp/3.0/guides/user/kubernetes/create-service-account/ + - title: Install a CNI plugin + path: /datacenter/ucp/3.0/guides/user/kubernetes/install-cni-plugin/ - path: /datacenter/ucp/3.0/reference/api/ title: API reference - - path: /ee/ucp/release-notes/ + - path: /ee/ucp/release-notes/#version-30 title: Release notes nosync: true - path: /datacenter/ucp/3.0/guides/get-support/ @@ -1647,6 +1726,8 @@ manuals: title: Restrict services to worker nodes - path: /datacenter/ucp/2.2/guides/admin/configure/run-only-the-images-you-trust/ title: Run only the images you trust + - path: /datacenter/ucp/2.2/guides/admin/configure/use-trusted-images-for-ci/ + title: Use trusted images for continuous integration - path: /datacenter/ucp/2.2/guides/admin/configure/scale-your-cluster/ title: Scale your cluster - path: /datacenter/ucp/2.2/guides/admin/configure/set-session-timeout/ @@ -1761,7 +1842,7 @@ manuals: title: Grant access to secrets - path: /datacenter/ucp/2.2/reference/api/ title: API reference - - path: /ee/ucp/release-notes/ + - path: /ee/ucp/release-notes/#version-22 title: Release notes nosync: true - path: /datacenter/ucp/2.2/guides/get-support/ @@ -1812,6 +1893,8 @@ manuals: title: Use domain names to access services - path: /datacenter/ucp/2.1/guides/admin/configure/run-only-the-images-you-trust/ title: Run only the images you trust + - path: /datacenter/ucp/2.1/guides/admin/configure/use-trusted-images-for-ci/ + title: Use trusted images for continuous integration - path: /datacenter/ucp/2.1/guides/admin/configure/integrate-with-dtr/ title: Integrate with Docker Trusted Registry - path: /datacenter/ucp/2.1/guides/admin/configure/external-auth/ @@ -2162,6 +2245,8 @@ manuals: section: - path: /ee/dtr/admin/configure/external-storage/ title: Overview + - path: /ee/dtr/admin/configure/external-storage/storage-backend-migration/ + title: Switch storage backends - path: /ee/dtr/admin/configure/external-storage/s3/ title: S3 - path: /ee/dtr/admin/configure/external-storage/nfs/ @@ -2267,10 +2352,8 @@ manuals: section: - path: /ee/dtr/user/manage-images/sign-images/ title: Sign an image - - path: /ee/dtr/user/manage-images/sign-images/delegate-image-signing/ - title: Delegate image signing - - path: /ee/dtr/user/manage-images/sign-images/manage-trusted-repositories/ - title: Manage trusted repositories + - path: /ee/dtr/user/manage-images/sign-images/trust-with-remote-ucp/ + title: Trust with a Remote UCP - sectiontitle: Promotion policies and mirroring section: - title: Overview @@ -3359,7 +3442,7 @@ manuals: - path: /docker-hub/slack_integration/ title: Slack Integration - path: /docker-hub/upgrade/ - title: Upgrading your plan + title: Upgrade your plan - sectiontitle: Automated Builds section: - path: /docker-hub/builds/ @@ -3521,28 +3604,6 @@ manuals: nosync: true - sectiontitle: Superseded products and tools section: - - sectiontitle: Commercially supported Docker Engine - section: - - sectiontitle: 1.13 - section: - - path: /cs-engine/1.13/ - title: Install - - path: /cs-engine/1.13/upgrade/ - title: Upgrade - - path: /cs-engine/1.13/release-notes/ - title: Release notes - - sectiontitle: 1.12 - section: - - path: /cs-engine/1.12/ - title: Install - - path: /cs-engine/1.12/upgrade/ - title: Upgrade - - sectiontitle: Release notes - section: - - path: /cs-engine/1.12/release-notes/release-notes/ - title: CS Engine release notes - - path: /cs-engine/1.12/release-notes/prior-release-notes/ - title: Prior CS Engine release notes - sectiontitle: Docker Swarm (standalone) section: - path: /swarm/overview/ diff --git a/_includes/content/compose-extfields-sub.md b/_includes/content/compose-extfields-sub.md index c6af94c088..b415632fda 100644 --- a/_includes/content/compose-extfields-sub.md +++ b/_includes/content/compose-extfields-sub.md @@ -8,7 +8,7 @@ your Compose file and their name start with the `x-` character sequence. > (for the 2.x series), extension fields are also allowed at the root > of service, volume, network, config and secret definitions. -```none +```yaml version: '3.4' x-custom: items: @@ -24,7 +24,7 @@ inserted in your resource definitions using [YAML anchors](http://www.yaml.org/s For example, if you want several of your services to use the same logging configuration: -```none +```yaml logging: options: max-size: '12m' @@ -34,7 +34,7 @@ logging: You may write your Compose file as follows: -```none +```yaml version: '3.4' x-logging: &default-logging @@ -55,7 +55,7 @@ services: It is also possible to partially override values in extension fields using the [YAML merge type](http://yaml.org/type/merge.html). For example: -```none +```yaml version: '3.4' x-volumes: &default-volume diff --git a/_includes/content/compose-var-sub.md b/_includes/content/compose-var-sub.md index 54a780c114..4c333daab5 100644 --- a/_includes/content/compose-var-sub.md +++ b/_includes/content/compose-var-sub.md @@ -3,8 +3,10 @@ variable values from the shell environment in which `docker-compose` is run. For example, suppose the shell contains `POSTGRES_VERSION=9.3` and you supply this configuration: - db: - image: "postgres:${POSTGRES_VERSION}" +```yaml +db: + image: "postgres:${POSTGRES_VERSION}" +``` When you run `docker-compose up` with this configuration, Compose looks for the `POSTGRES_VERSION` environment variable in the shell and substitutes its value @@ -47,9 +49,11 @@ dollar sign. This also prevents Compose from interpolating a value, so a `$$` allows you to refer to environment variables that you don't want processed by Compose. - web: - build: . - command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE" +```yaml +web: + build: . + command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE" +``` If you forget and use a single dollar sign (`$`), Compose interprets the value as an environment variable and warns you: diff --git a/_includes/footer.html b/_includes/footer.html index b47515ee47..36222b4b05 100644 --- a/_includes/footer.html +++ b/_includes/footer.html @@ -47,6 +47,7 @@
  • Documentation
  • Learn
  • Blog
  • +
  • Engineering Blog
  • Training
  • Support
  • Knowledge Base
  • diff --git a/_includes/kubernetes-mac-win.md b/_includes/kubernetes-mac-win.md index 7cb7e1676e..093a7c81a6 100644 --- a/_includes/kubernetes-mac-win.md +++ b/_includes/kubernetes-mac-win.md @@ -15,7 +15,7 @@ Usage: {% include kubernetes-mac-win.md platform="mac" %} {% capture min-version %}{{ product }} 18.06.0-ce-mac70 CE{% endcapture %} {% capture version-caveat %} - **Kubernetes is only available in {{ min-version }} and higher. + **Kubernetes is only available in {{ min-version }} and higher.** {% endcapture %} {% capture local-kubectl-warning %} @@ -34,7 +34,7 @@ Usage: {% include kubernetes-mac-win.md platform="mac" %} {% capture min-version %}{{ product }} 18.06.0-ce-win70 CE{% endcapture %} {% capture version-caveat %} - **Kubernetes is only available in {{ min-version }} and higher. + **Kubernetes is only available in {{ min-version }} and higher.** {% endcapture %} {% capture local-kubectl-warning %} diff --git a/_scripts/fetch-library-samples.sh b/_scripts/fetch-library-samples.sh new file mode 100755 index 0000000000..e8b1d2c6ec --- /dev/null +++ b/_scripts/fetch-library-samples.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Get the Library docs +svn co https://github.com/docker-library/docs/trunk ./_samples/library || (echo "Failed library download" && exit 1) +# Remove symlinks to maintainer.md because they break jekyll and we don't use em +find ./_samples/library -maxdepth 9 -type l -delete +# Loop through the README.md files, turn them into rich index.md files +FILES=$(find ./_samples/library -type f -name 'README.md') +for f in ${FILES} +do + curdir=$(dirname "${f}") + justcurdir="${curdir##*/}" + if [ -e ${curdir}/README-short.txt ] + then + # shortrm=$(<${curdir}/README-short.txt) + shortrm=$(cat ${curdir}/README-short.txt) + fi + echo "Adding front-matter to ${f} ..." + echo --- >> ${curdir}/front-matter.txt + echo title: "${justcurdir}" >> ${curdir}/front-matter.txt + echo keywords: library, sample, ${justcurdir} >> ${curdir}/front-matter.txt + echo repo: "${justcurdir}" >> ${curdir}/front-matter.txt + echo layout: docs >> ${curdir}/front-matter.txt + echo permalink: /samples/library/${justcurdir}/ >> ${curdir}/front-matter.txt + echo redirect_from: >> ${curdir}/front-matter.txt + echo - /samples/${justcurdir}/ >> ${curdir}/front-matter.txt + echo description: \| >> ${curdir}/front-matter.txt + echo \ \ ${shortrm} >> ${curdir}/front-matter.txt + echo --- >> ${curdir}/front-matter.txt + echo >> ${curdir}/front-matter.txt + echo ${shortrm} >> ${curdir}/front-matter.txt + echo >> ${curdir}/front-matter.txt + if [ -e ${curdir}/github-repo ] + then + # gitrepo=$(<${curdir}/github-repo) + gitrepo=$(cat ${curdir}/github-repo) + echo >> ${curdir}/front-matter.txt + echo GitHub repo: \["${gitrepo}"\]\("${gitrepo}"\)\{: target="_blank"\} >> ${curdir}/front-matter.txt + echo >> ${curdir}/front-matter.txt + fi + cat ${curdir}/front-matter.txt ./_samples/boilerplate.txt > ${curdir}/header.txt + echo {% raw %} >> ${curdir}/header.txt + cat ${curdir}/header.txt ${curdir}/README.md > ${curdir}/index.md + echo {% endraw %} >> ${curdir}/index.md + rm -rf ${curdir}/front-matter.txt + rm -rf ${curdir}/header.txt +done + +rm ./_samples/library/index.md diff --git a/_scripts/fetch-upstream-resources.sh b/_scripts/fetch-upstream-resources.sh index 3058266fc5..81ffff7e4b 100755 --- a/_scripts/fetch-upstream-resources.sh +++ b/_scripts/fetch-upstream-resources.sh @@ -29,28 +29,11 @@ while getopts ":hl" opt; do done # Do some sanity-checking to make sure we are running this from the right place -if [ $LOCAL -eq 1 ]; then - SOURCE="." - if ! [ -f _config.yml ]; then - echo "Could not find _config.yml. We may not be in the right place. Bailing." - exit 1 - fi -else - SOURCE="md_source" - if ! [ -d md_source ]; then - echo "Could not find md_source directory. We may not be running in the right place. Bailing." - exit 1 - fi +if ! [ -f _config.yml ]; then + echo "Could not find _config.yml. We may not be in the right place. Bailing." + exit 1 fi -# Reasonable default to find the Markdown files -if [ -z "$SOURCE" ]; then - echo "No source passed in, assuming md_source/..." - SOURCE="md_source" -fi - -echo "Operating on contents of $SOURCE" - # Parse some variables from _config.yml and make them available to this script # This only finds top-level variables with _version in them that don't have any # leading space. This is brittle! @@ -61,10 +44,10 @@ while read i; do varvalue=$(echo "$i" | sed 's/"//g' | awk -F ':' {'print $2'} | tr -d '[:space:]') echo "Setting \$${varname} to $varvalue" declare "$varname=$varvalue" -done < <(cat ${SOURCE}/_config.yml |grep '_version:' |grep '^[a-z].*') +done < <(cat ./_config.yml |grep '_version:' |grep '^[a-z].*') # Replace variable in toc.yml with value from above -sedi "s/{{ site.latest_engine_api_version }}/$latest_engine_api_version/g" ${SOURCE}/_data/toc.yaml +sedi "s/{{ site.latest_engine_api_version }}/$latest_engine_api_version/g" ./_data/toc.yaml # Engine stable ENGINE_SVN_BRANCH="branches/18.09" @@ -75,94 +58,42 @@ DISTRIBUTION_SVN_BRANCH="branches/release/2.6" DISTRIBUTION_BRANCH="release/2.6" # Directories to get via SVN. We use this because you can't use git to clone just a portion of a repository -svn co https://github.com/docker/docker-ce/"$ENGINE_SVN_BRANCH"/components/cli/docs/extend ${SOURCE}/engine/extend || (echo "Failed engine/extend download" && exit -1) -svn co https://github.com/docker/docker-ce/"$ENGINE_SVN_BRANCH"/components/engine/docs/api ${SOURCE}/engine/api || (echo "Failed engine/api download" && exit -1) # This will only get you the old API MD files 1.18 through 1.24 -svn co https://github.com/docker/distribution/"$DISTRIBUTION_SVN_BRANCH"/docs/spec ${SOURCE}/registry/spec || (echo "Failed registry/spec download" && exit -1) -svn co https://github.com/docker/compliance/trunk/docs/compliance ${SOURCE}/compliance || (echo "Failed docker/compliance download" && exit -1) - -# Get the Library docs -svn co https://github.com/docker-library/docs/trunk ${SOURCE}/_samples/library || (echo "Failed library download" && exit -1) -# Remove symlinks to maintainer.md because they break jekyll and we don't use em -find ${SOURCE}/_samples/library -maxdepth 9 -type l -delete -# Loop through the README.md files, turn them into rich index.md files -FILES=$(find ${SOURCE}/_samples/library -type f -name 'README.md') -for f in $FILES -do - curdir=$(dirname "${f}") - justcurdir="${curdir##*/}" - if [ -e ${curdir}/README-short.txt ] - then - # shortrm=$(<${curdir}/README-short.txt) - shortrm=$(cat ${curdir}/README-short.txt) - fi - echo "Adding front-matter to ${f} ..." - echo --- >> ${curdir}/front-matter.txt - echo title: "${justcurdir}" >> ${curdir}/front-matter.txt - echo keywords: library, sample, ${justcurdir} >> ${curdir}/front-matter.txt - echo repo: "${justcurdir}" >> ${curdir}/front-matter.txt - echo layout: docs >> ${curdir}/front-matter.txt - echo permalink: /samples/library/${justcurdir}/ >> ${curdir}/front-matter.txt - echo redirect_from: >> ${curdir}/front-matter.txt - echo - /samples/${justcurdir}/ >> ${curdir}/front-matter.txt - echo description: \| >> ${curdir}/front-matter.txt - echo \ \ ${shortrm} >> ${curdir}/front-matter.txt - echo --- >> ${curdir}/front-matter.txt - echo >> ${curdir}/front-matter.txt - echo ${shortrm} >> ${curdir}/front-matter.txt - echo >> ${curdir}/front-matter.txt - if [ -e ${curdir}/github-repo ] - then - # gitrepo=$(<${curdir}/github-repo) - gitrepo=$(cat ${curdir}/github-repo) - echo >> ${curdir}/front-matter.txt - echo GitHub repo: \["${gitrepo}"\]\("${gitrepo}"\)\{: target="_blank"\} >> ${curdir}/front-matter.txt - echo >> ${curdir}/front-matter.txt - fi - cat ${curdir}/front-matter.txt ${SOURCE}/_samples/boilerplate.txt > ${curdir}/header.txt - echo {% raw %} >> ${curdir}/header.txt - cat ${curdir}/header.txt ${curdir}/README.md > ${curdir}/index.md - echo {% endraw %} >> ${curdir}/index.md - rm -rf ${curdir}/front-matter.txt - rm -rf ${curdir}/header.txt -done +svn co https://github.com/docker/docker-ce/"$ENGINE_SVN_BRANCH"/components/cli/docs/extend ./engine/extend || (echo "Failed engine/extend download" && exit 1) +svn co https://github.com/docker/docker-ce/"$ENGINE_SVN_BRANCH"/components/engine/docs/api ./engine/api || (echo "Failed engine/api download" && exit 1) # This will only get you the old API MD files 1.18 through 1.24 +svn co https://github.com/docker/distribution/"$DISTRIBUTION_SVN_BRANCH"/docs/spec ./registry/spec || (echo "Failed registry/spec download" && exit 1) +svn co https://github.com/docker/compliance/trunk/docs/compliance ./compliance || (echo "Failed docker/compliance download" && exit 1) # Get the Engine APIs that are in Swagger # Be careful with the locations on Github for these -wget -O ${SOURCE}/engine/api/v1.25/swagger.yaml https://raw.githubusercontent.com/docker/docker/v1.13.0/api/swagger.yaml || (echo "Failed 1.25 swagger download" && exit -1) -wget -O ${SOURCE}/engine/api/v1.26/swagger.yaml https://raw.githubusercontent.com/docker/docker/v17.03.0-ce/api/swagger.yaml || (echo "Failed 1.26 swagger download" && exit -1) -wget -O ${SOURCE}/engine/api/v1.27/swagger.yaml https://raw.githubusercontent.com/docker/docker/v17.03.1-ce/api/swagger.yaml || (echo "Failed 1.27 swagger download" && exit -1) - -# Get the Edge API Swagger # When you change this you need to make sure to copy the previous # directory into a new one in the docs git and change the index.html -wget -O ${SOURCE}/engine/api/v1.28/swagger.yaml https://raw.githubusercontent.com/docker/docker/v17.04.0-ce/api/swagger.yaml || (echo "Failed 1.28 swagger download or the 1.28 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.29/swagger.yaml https://raw.githubusercontent.com/docker/docker/17.05.x/api/swagger.yaml || (echo "Failed 1.29 swagger download or the 1.29 directory doesn't exist" && exit -1) +wget --quiet --directory-prefix=./engine/api/v1.25/ https://raw.githubusercontent.com/docker/docker/v1.13.0/api/swagger.yaml || (echo "Failed 1.25 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.26/ https://raw.githubusercontent.com/docker/docker/v17.03.0-ce/api/swagger.yaml || (echo "Failed 1.26 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.27/ https://raw.githubusercontent.com/docker/docker/v17.03.1-ce/api/swagger.yaml || (echo "Failed 1.27 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.28/ https://raw.githubusercontent.com/docker/docker/v17.04.0-ce/api/swagger.yaml || (echo "Failed 1.28 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.29/ https://raw.githubusercontent.com/docker/docker/17.05.x/api/swagger.yaml || (echo "Failed 1.29 swagger download" && exit 1) # New location for swagger.yaml for 17.06+ -wget -O ${SOURCE}/engine/api/v1.30/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/17.06/components/engine/api/swagger.yaml || (echo "Failed 1.30 swagger download or the 1.30 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.31/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/17.07/components/engine/api/swagger.yaml || (echo "Failed 1.31 swagger download or the 1.31 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.32/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/17.09/components/engine/api/swagger.yaml || (echo "Failed 1.32 swagger download or the 1.32 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.33/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/17.10/components/engine/api/swagger.yaml || (echo "Failed 1.33 swagger download or the 1.33 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.34/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/17.11/components/engine/api/swagger.yaml || (echo "Failed 1.34 swagger download or the 1.34 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.35/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/17.12/components/engine/api/swagger.yaml || (echo "Failed 1.35 swagger download or the 1.35 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.36/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/18.02/components/engine/api/swagger.yaml || (echo "Failed 1.36 swagger download or the 1.36 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.37/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/18.03/components/engine/api/swagger.yaml || (echo "Failed 1.37 swagger download or the 1.37 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.38/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/18.06/components/engine/api/swagger.yaml || (echo "Failed 1.38 swagger download or the 1.38 directory doesn't exist" && exit -1) -wget -O ${SOURCE}/engine/api/v1.39/swagger.yaml https://raw.githubusercontent.com/docker/docker-ce/18.09/components/engine/api/swagger.yaml || (echo "Failed 1.39 swagger download or the 1.39 directory doesn't exist" && exit -1) +wget --quiet --directory-prefix=./engine/api/v1.30/ https://raw.githubusercontent.com/docker/docker-ce/17.06/components/engine/api/swagger.yaml || (echo "Failed 1.30 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.31/ https://raw.githubusercontent.com/docker/docker-ce/17.07/components/engine/api/swagger.yaml || (echo "Failed 1.31 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.32/ https://raw.githubusercontent.com/docker/docker-ce/17.09/components/engine/api/swagger.yaml || (echo "Failed 1.32 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.33/ https://raw.githubusercontent.com/docker/docker-ce/17.10/components/engine/api/swagger.yaml || (echo "Failed 1.33 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.34/ https://raw.githubusercontent.com/docker/docker-ce/17.11/components/engine/api/swagger.yaml || (echo "Failed 1.34 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.35/ https://raw.githubusercontent.com/docker/docker-ce/17.12/components/engine/api/swagger.yaml || (echo "Failed 1.35 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.36/ https://raw.githubusercontent.com/docker/docker-ce/18.02/components/engine/api/swagger.yaml || (echo "Failed 1.36 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.37/ https://raw.githubusercontent.com/docker/docker-ce/18.03/components/engine/api/swagger.yaml || (echo "Failed 1.37 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.38/ https://raw.githubusercontent.com/docker/docker-ce/18.06/components/engine/api/swagger.yaml || (echo "Failed 1.38 swagger download" && exit 1) +wget --quiet --directory-prefix=./engine/api/v1.39/ https://raw.githubusercontent.com/docker/docker-ce/18.09/components/engine/api/swagger.yaml || (echo "Failed 1.39 swagger download" && exit 1) -# Get dockerd.md from upstream -wget -O ${SOURCE}/engine/reference/commandline/dockerd.md https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/reference/commandline/dockerd.md || (echo "Failed to fetch stable dockerd.md" && exit -1) # Get a few one-off files that we use directly from upstream -wget -O ${SOURCE}/engine/reference/builder.md https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/reference/builder.md || (echo "Failed engine/reference/builder.md download" && exit -1) -wget -O ${SOURCE}/engine/reference/run.md https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/reference/run.md || (echo "Failed engine/reference/run.md download" && exit -1) -# Adjust this one when Edge != Stable -wget -O ${SOURCE}/edge/engine/reference/run.md https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/reference/run.md || (echo "Failed engine/reference/run.md download" && exit -1) -wget -O ${SOURCE}/engine/reference/commandline/cli.md https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/reference/commandline/cli.md || (echo "Failed engine/reference/commandline/cli.md download" && exit -1) -wget -O ${SOURCE}/engine/deprecated.md https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/deprecated.md || (echo "Failed engine/deprecated.md download" && exit -1) -wget -O ${SOURCE}/registry/configuration.md https://raw.githubusercontent.com/docker/distribution/"$DISTRIBUTION_BRANCH"/docs/configuration.md || (echo "Failed registry/configuration.md download" && exit -1) +wget --quiet --directory-prefix=./engine/ https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/deprecated.md || (echo "Failed engine/deprecated.md download" && exit 1) +wget --quiet --directory-prefix=./engine/reference/ https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/reference/builder.md || (echo "Failed engine/reference/builder.md download" && exit 1) +wget --quiet --directory-prefix=./engine/reference/ https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/reference/run.md || (echo "Failed engine/reference/run.md download" && exit 1) +wget --quiet --directory-prefix=./engine/reference/commandline/ https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/reference/commandline/cli.md || (echo "Failed engine/reference/commandline/cli.md download" && exit 1) +wget --quiet --directory-prefix=./engine/reference/commandline/ https://raw.githubusercontent.com/docker/docker-ce/"$ENGINE_BRANCH"/components/cli/docs/reference/commandline/dockerd.md || (echo "Failed engine/reference/commandline/dockerd.md download" && exit 1) +wget --quiet --directory-prefix=./registry/ https://raw.githubusercontent.com/docker/distribution/"$DISTRIBUTION_BRANCH"/docs/configuration.md || (echo "Failed registry/configuration.md download" && exit 1) # Remove things we don't want in the build -rm ${SOURCE}/registry/spec/api.md.tmpl -rm -rf ${SOURCE}/apidocs/cloud-api-source -rm -rf ${SOURCE}/tests -rm ${SOURCE}/_samples/library/index.md +rm ./registry/spec/api.md.tmpl +rm -rf ./apidocs/cloud-api-source +rm -rf ./tests diff --git a/compose/completion.md b/compose/completion.md index 91c8ba2cd4..d05b66ee0e 100644 --- a/compose/completion.md +++ b/compose/completion.md @@ -17,11 +17,12 @@ Make sure bash completion is installed. 1. On a current Linux OS (in a non-minimal installation), bash completion should be available. + 2. Place the completion script in `/etc/bash_completion.d/`. -```shell -sudo curl -L https://raw.githubusercontent.com/docker/compose/{{site.compose_version}}/contrib/completion/bash/docker-compose -o /etc/bash_completion.d/docker-compose -``` + ```shell + sudo curl -L https://raw.githubusercontent.com/docker/compose/{{site.compose_version}}/contrib/completion/bash/docker-compose -o /etc/bash_completion.d/docker-compose + ``` ### Mac @@ -32,17 +33,17 @@ sudo curl -L https://raw.githubusercontent.com/docker/compose/{{site.compose_ver For example, when running this command on Mac 10.13.2, place the completion script in `/usr/local/etc/bash_completion.d/`. -```shell -sudo curl -L https://raw.githubusercontent.com/docker/compose/{{site.compose_version}}/contrib/completion/bash/docker-compose -o /usr/local/etc/bash_completion.d/docker-compose -``` + ```shell + sudo curl -L https://raw.githubusercontent.com/docker/compose/{{site.compose_version}}/contrib/completion/bash/docker-compose -o /usr/local/etc/bash_completion.d/docker-compose + ``` 3. Add the following to your `~/.bash_profile`: -```shell -if [ -f $(brew --prefix)/etc/bash_completion ]; then + ```shell + if [ -f $(brew --prefix)/etc/bash_completion ]; then . $(brew --prefix)/etc/bash_completion -fi -``` + fi + ``` 4. You can source your `~/.bash_profile` or launch a new terminal to utilize completion. @@ -50,13 +51,14 @@ completion. ##### Install via MacPorts 1. Run `sudo port install bash-completion` to install bash completion. + 2. Add the following lines to `~/.bash_profile`: -```shell -if [ -f /opt/local/etc/profile.d/bash_completion.sh ]; then + ```shell + if [ -f /opt/local/etc/profile.d/bash_completion.sh ]; then . /opt/local/etc/profile.d/bash_completion.sh -fi -``` + fi + ``` 3. You can source your `~/.bash_profile` or launch a new terminal to utilize completion. @@ -72,34 +74,34 @@ Add `docker` and `docker-compose` to the plugins list in `~/.zshrc` to run autoc ```shell plugins=(... docker docker-compose ) -``` + ``` #### Without oh-my-zsh shell 1. Place the completion script in your `/path/to/zsh/completion` (typically `~/.zsh/completion/`): -```shell -$ mkdir -p ~/.zsh/completion -$ curl -L https://raw.githubusercontent.com/docker/compose/{{site.compose_version}}/contrib/completion/zsh/_docker-compose > ~/.zsh/completion/_docker-compose -``` + ```shell + $ mkdir -p ~/.zsh/completion + $ curl -L https://raw.githubusercontent.com/docker/compose/{{site.compose_version}}/contrib/completion/zsh/_docker-compose > ~/.zsh/completion/_docker-compose + ``` 2. Include the directory in your `$fpath` by adding in `~/.zshrc`: -```shell -fpath=(~/.zsh/completion $fpath) -``` + ```shell + fpath=(~/.zsh/completion $fpath) + ``` 3. Make sure `compinit` is loaded or do it by adding in `~/.zshrc`: -```shell -autoload -Uz compinit && compinit -i -``` + ```shell + autoload -Uz compinit && compinit -i + ``` 4. Then reload your shell: -```shell -exec $SHELL -l -``` + ```shell + exec $SHELL -l + ``` ## Available completions diff --git a/compose/compose-file/compose-file-v2.md b/compose/compose-file/compose-file-v2.md index f6c356dab2..6ee0c66246 100644 --- a/compose/compose-file/compose-file-v2.md +++ b/compose/compose-file/compose-file-v2.md @@ -50,7 +50,7 @@ definition in version 2. A set of configuration options to set block IO limits for this service. - version: '2.2' + version: "{{ site.compose_file_v2 }}" services: foo: image: busybox @@ -196,6 +196,10 @@ or a list: args: - buildno=1 - gitcommithash=cdc3b19 + +> **Note**: In your Dockerfile, if you specify `ARG` before the `FROM` instruction, +> If you need an argument to be available in both places, also specify it under the `FROM` instruction. +> See [Understand how ARGS and FROM interact](/engine/reference/builder/#understand-how-arg-and-from-interact) for usage details. You can omit the value when specifying a build argument, in which case its value at build time is the value in the environment where Compose is running. @@ -389,7 +393,7 @@ Express dependency between services, which has two effects: Simple example: - version: '2' + version: "{{ site.compose_file_v2 }}" services: web: build: . @@ -414,7 +418,7 @@ the healthcheck) before starting. Example: - version: '2.1' + version: "{{ site.compose_file_v2 }}" services: web: build: . @@ -663,7 +667,7 @@ details. A full example: ``` -version: '2' +version: "{{ site.compose_file_v2 }}" services: myservice: image: alpine @@ -734,21 +738,19 @@ options and tags it with the specified tag. > [Added in version 2.2 file format](compose-versioning.md#version-22). Run an init inside the container that forwards signals and reaps processes. -Either set a boolean value to use the default `init`, or specify a path to -a custom one. +Set this option to `true` to enable this feature for the service. - version: '2.2' + version: "{{ site.compose_file_v2 }}" services: web: image: alpine:latest init: true +> The default init binary that is used is [Tini](https://github.com/krallin/tini), +> and is installed in `/usr/libexec/docker-init` on the daemon host. You can +> configure the daemon to use a custom init binary through the +> [`init-path` configuration option](/engine/reference/commandline/dockerd/#daemon-configuration-file). - version: '2.2' - services: - web: - image: alpine:latest - init: /usr/libexec/docker-init ### isolation @@ -881,7 +883,7 @@ The general format is shown here. In the example below, three services are provided (`web`, `worker`, and `db`), along with two networks (`new` and `legacy`). The `db` service is reachable at the hostname `db` or `database` on the `new` network, and at `db` or `mysql` on the `legacy` network. - version: '2' + version: "{{ site.compose_file_v2 }}" services: web: @@ -916,7 +918,7 @@ The corresponding network configuration in the [top-level networks section](#net An example: - version: '2.1' + version: "{{ site.compose_file_v2 }}" services: app: @@ -950,7 +952,7 @@ managed by docker (IPAM driver). Example usage: - version: '2.1' + version: "{{ site.compose_file_v2 }}" services: app: image: busybox @@ -973,7 +975,7 @@ In the following example, the `app` service connects to `app_net_1` first as it has the highest priority. It then connects to `app_net_3`, then `app_net_2`, which uses the default priority value of `0`. - version: '2.3' + version: "{{ site.compose_file_v2 }}" services: app: image: busybox @@ -1212,7 +1214,7 @@ expressed in the short form. ```none -version: "2.3" +version: "{{ site.compose_file_v2 }}" services: web: image: nginx:alpine @@ -1376,7 +1378,7 @@ Here's an example of a two-service setup where a database's data directory is shared with another service as a volume so that it can be periodically backed up: - version: "2.2" + version: "{{ site.compose_file_v2 }}" services: db: @@ -1429,7 +1431,7 @@ In the example below, instead of attempting to create a volume called `[projectname]_data`, Compose looks for an existing volume simply called `data` and mount it into the `db` service's containers. - version: '2' + version: "{{ site.compose_file_v2 }}" services: db: @@ -1480,14 +1482,14 @@ conflicting with those used by other software. Set a custom name for this volume. - version: '2.1' + version: "{{ site.compose_file_v2 }}" volumes: data: name: my-app-data It can also be used in conjunction with the `external` property: - version: '2.1' + version: "{{ site.compose_file_v2 }}" volumes: data: external: true @@ -1605,7 +1607,7 @@ attempting to create a network called `[projectname]_outside`, Compose looks for an existing network simply called `outside` and connect the `proxy` service's containers to it. - version: '2' + version: "{{ site.compose_file_v2 }}" services: proxy: @@ -1640,14 +1642,14 @@ Not supported for version 2 `docker-compose` files. Use Set a custom name for this network. - version: '2.1' + version: "{{ site.compose_file_v2 }}" networks: network1: name: my-app-net It can also be used in conjunction with the `external` property: - version: '2.1' + version: "{{ site.compose_file_v2 }}" networks: network1: external: true diff --git a/compose/compose-file/compose-versioning.md b/compose/compose-file/compose-versioning.md index 74b714401e..724f50cf81 100644 --- a/compose/compose-file/compose-versioning.md +++ b/compose/compose-file/compose-versioning.md @@ -52,6 +52,12 @@ omitting a `version` key at the root of the YAML. be cross-compatible between Compose and the Docker Engine's [swarm mode](/engine/swarm/index.md). This is specified with a `version: '3'` or `version: '3.1'`, etc., entry at the root of the YAML. +> ### v2 and v3 Declaration +> +> **Note**: When specifying the Compose file version to use, make sure to +> specify both the _major_ and _minor_ numbers. If no minor version is given, +> `0` is used by default and not the latest minor version. + The [Compatibility Matrix](#compatibility-matrix) shows Compose file versions mapped to Docker Engine releases. @@ -121,9 +127,24 @@ discoverable at a hostname that's the same as the service name. This means [links](compose-file-v2.md#links) are largely unnecessary. For more details, see [Networking in Compose](compose-file-v2.md#networking.md). +> **Note**: When specifying the Compose file version to use, make sure to +> specify both the _major_ and _minor_ numbers. If no minor version is given, +> `0` is used by default and not the latest minor version. As a result, features added in +> later versions will not be supported. For example: +> +> ```yaml +> version: "2" +> ``` +> +> is equivalent to: +> +> ```yaml +> version: "2.0" +> ``` + Simple example: - version: '2' + version: "{{ site.compose_file_v2 }}" services: web: build: . @@ -136,7 +157,7 @@ Simple example: A more extended example, defining volumes and networks: - version: '2' + version: "{{ site.compose_file_v2 }}" services: web: build: . @@ -169,7 +190,7 @@ Several other options were added to support networking, such as: * The [`depends_on`](compose-file-v2.md#depends_on) option can be used in place of links to indicate dependencies between services and startup order. - version: '2' + version: "{{ site.compose_file_v2 }}" services: web: build: . @@ -259,6 +280,21 @@ the [upgrading](#upgrading) guide for how to migrate away from these. - Added: [deploy](/compose/compose-file/index.md#deploy) +> **Note**: When specifying the Compose file version to use, make sure to +> specify both the _major_ and _minor_ numbers. If no minor version is given, +> `0` is used by default and not the latest minor version. As a result, features added in +> later versions will not be supported. For example: +> +> ```yaml +> version: "3" +> ``` +> +> is equivalent to: +> +> ```yaml +> version: "3.0" +> ``` + ### Version 3.3 An upgrade of [version 3](#version-3) that introduces new parameters only @@ -327,7 +363,7 @@ several options have been removed: [top-level `volumes` option](/compose/compose-file/index.md#volume-configuration-reference) and specify the driver there. - version: "3" + version: "{{ site.compose_file_v3 }}" services: db: image: postgres @@ -422,7 +458,7 @@ It's more complicated if you're using particular configuration features: named volume called `data`, you must declare a `data` volume in your top-level `volumes` section. The whole file might look like this: - version: '2' + version: "{{ site.compose_file_v2 }}" services: db: image: postgres diff --git a/compose/compose-file/index.md b/compose/compose-file/index.md index 4a736aaaa2..35694eb04c 100644 --- a/compose/compose-file/index.md +++ b/compose/compose-file/index.md @@ -30,7 +30,7 @@ how to upgrade, see **[About versions and upgrading](compose-versioning.md)**.
    
    -version: "3"
    +version: "{{ site.compose_file_v3 }}"
     services:
     
       redis:
    @@ -181,8 +181,8 @@ Configuration options that are applied at build time.
     `build` can be specified either as a string containing a path to the build
     context:
     
    -```none
    -version: '3'
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       webapp:
         build: ./dir
    @@ -191,8 +191,8 @@ services:
     Or, as an object with the path specified under [context](#context) and
     optionally [Dockerfile](#dockerfile) and [args](#args):
     
    -```none
    -version: '3'
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       webapp:
         build:
    @@ -205,8 +205,10 @@ services:
     If you specify `image` as well as `build`, then Compose names the built image
     with the `webapp` and optional `tag` specified in `image`:
     
    -    build: ./dir
    -    image: webapp:tag
    +```yaml
    +build: ./dir
    +image: webapp:tag
    +```
     
     This results in an image named `webapp` and tagged `tag`, built from `./dir`.
     
    @@ -225,8 +227,10 @@ sent to the Docker daemon.
     Compose builds and tags it with a generated name, and uses that image
     thereafter.
     
    -    build:
    -      context: ./dir
    +```yaml
    +build:
    +  context: ./dir
    +```
     
     #### dockerfile
     
    @@ -235,9 +239,11 @@ Alternate Dockerfile.
     Compose uses an alternate file to build with. A build path must also be
     specified.
     
    -    build:
    -      context: .
    -      dockerfile: Dockerfile-alternate
    +```yaml
    +build:
    +  context: .
    +  dockerfile: Dockerfile-alternate
    +```
     
     #### args
     
    @@ -246,33 +252,46 @@ build process.
     
     First, specify the arguments in your Dockerfile:
     
    -    ARG buildno
    -    ARG gitcommithash
    +```Dockerfile
    +ARG buildno
    +ARG gitcommithash
     
    -    RUN echo "Build number: $buildno"
    -    RUN echo "Based on commit: $gitcommithash"
    +RUN echo "Build number: $buildno"
    +RUN echo "Based on commit: $gitcommithash"
    +```
     
     Then specify the arguments under the `build` key. You can pass a mapping
     or a list:
     
    -    build:
    -      context: .
    -      args:
    -        buildno: 1
    -        gitcommithash: cdc3b19
    +```yaml
    +build:
    +  context: .
    +  args:
    +    buildno: 1
    +    gitcommithash: cdc3b19
    +```
     
    -    build:
    -      context: .
    -      args:
    -        - buildno=1
    -        - gitcommithash=cdc3b19
    +```yaml
    +build:
    +  context: .
    +  args:
    +    - buildno=1
    +    - gitcommithash=cdc3b19
    +```
    +
    +> **Note**: In your Dockerfile, if you specify `ARG` before the `FROM` instruction, 
    +> `ARG` is not available in the build instructions under `FROM`.
    +> If you need an argument to be available in both places, also specify it under the `FROM` instruction.
    +> See [Understand how ARGS and FROM interact](/engine/reference/builder/#understand-how-arg-and-from-interact) for usage details.
     
     You can omit the value when specifying a build argument, in which case its value
     at build time is the value in the environment where Compose is running.
     
    -    args:
    -      - buildno
    -      - gitcommithash
    +```yaml
    +args:
    +  - buildno
    +  - gitcommithash
    +```
     
     > **Note**: YAML boolean values (`true`, `false`, `yes`, `no`, `on`, `off`) must
     > be enclosed in quotes, so that the parser interprets them as strings.
    @@ -283,11 +302,13 @@ at build time is the value in the environment where Compose is running.
     
     A list of images that the engine uses for cache resolution.
     
    -    build:
    -      context: .
    -      cache_from:
    -        - alpine:latest
    -        - corp/web_app:3.14
    +```yaml
    +build:
    +  context: .
    +  cache_from:
    +    - alpine:latest
    +    - corp/web_app:3.14
    +```
     
     #### labels
     
    @@ -299,20 +320,23 @@ You can use either an array or a dictionary.
     We recommend that you use reverse-DNS notation to prevent your labels from conflicting with
     those used by other software.
     
    -    build:
    -      context: .
    -      labels:
    -        com.example.description: "Accounting webapp"
    -        com.example.department: "Finance"
    -        com.example.label-with-empty-value: ""
    +```yaml
    +build:
    +  context: .
    +  labels:
    +    com.example.description: "Accounting webapp"
    +    com.example.department: "Finance"
    +    com.example.label-with-empty-value: ""
    +```
     
    -
    -    build:
    -      context: .
    -      labels:
    -        - "com.example.description=Accounting webapp"
    -        - "com.example.department=Finance"
    -        - "com.example.label-with-empty-value"
    +```yaml
    +build:
    +  context: .
    +  labels:
    +    - "com.example.description=Accounting webapp"
    +    - "com.example.department=Finance"
    +    - "com.example.label-with-empty-value"
    +```
     
     #### shm_size
     
    @@ -322,14 +346,17 @@ Set the size of the `/dev/shm` partition for this build's containers. Specify
     as an integer value representing the number of bytes or as a string expressing
     a [byte value](#specifying-byte-values).
     
    -    build:
    -      context: .
    -      shm_size: '2gb'
    +```yaml
    +build:
    +  context: .
    +  shm_size: '2gb'
    +```
     
    -
    -    build:
    -      context: .
    -      shm_size: 10000000
    +```yaml
    +build:
    +  context: .
    +  shm_size: 10000000
    +```
     
     #### target
     
    @@ -339,21 +366,25 @@ Build the specified stage as defined inside the `Dockerfile`. See the
     [multi-stage build docs](/engine/userguide/eng-image/multistage-build.md) for
     details.
     
    -      build:
    -        context: .
    -        target: prod
    +```yaml
    +build:
    +  context: .
    +  target: prod
    +```
     
     ### cap_add, cap_drop
     
     Add or drop container capabilities.
     See `man 7 capabilities` for a full list.
     
    -    cap_add:
    -      - ALL
    +```yaml
    +cap_add:
    +  - ALL
     
    -    cap_drop:
    -      - NET_ADMIN
    -      - SYS_ADMIN
    +cap_drop:
    +  - NET_ADMIN
    +  - SYS_ADMIN
    +```
     
     > **Note**: These options are ignored when
     > [deploying a stack in swarm mode](/engine/reference/commandline/stack_deploy.md)
    @@ -363,7 +394,9 @@ See `man 7 capabilities` for a full list.
     
     Specify an optional parent cgroup for the container.
     
    -    cgroup_parent: m-executor-abcd
    +```yaml
    +cgroup_parent: m-executor-abcd
    +```
     
     > **Note**: This option is ignored when
     > [deploying a stack in swarm mode](/engine/reference/commandline/stack_deploy.md)
    @@ -373,12 +406,16 @@ Specify an optional parent cgroup for the container.
     
     Override the default command.
     
    -    command: bundle exec thin -p 3000
    +```yaml
    +command: bundle exec thin -p 3000
    +```
     
     The command can also be a list, in a manner similar to
     [dockerfile](/engine/reference/builder.md#cmd):
     
    -    command: ["bundle", "exec", "thin", "-p", "3000"]
    +```yaml
    +command: ["bundle", "exec", "thin", "-p", "3000"]
    +```
     
     ### configs
     
    @@ -409,8 +446,8 @@ the stack deployment fails with a `config not found` error.
     > **Note**: `config` definitions are only supported in version 3.3 and higher
     >  of the compose file format.
     
    -```none
    -version: "3.3"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       redis:
         image: redis:latest
    @@ -451,8 +488,8 @@ container, sets the mode to `0440` (group-readable) and sets the user and group
     to `103`. The `redis` service does not have access to the `my_other_config`
     config.
     
    -```none
    -version: "3.3"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       redis:
         image: redis:latest
    @@ -478,7 +515,9 @@ short syntax. Defining a config does not imply granting a service access to it.
     
     Specify a custom container name, rather than a generated default name.
     
    -    container_name: my-web-container
    +```yaml
    +container_name: my-web-container
    +```
     
     Because Docker container names must be unique, you cannot scale a service beyond
     1 container if you have specified a custom name. Attempting to do so results in
    @@ -501,8 +540,10 @@ subdirectory in the Docker data directory, which defaults to `C:\ProgramData\Doc
     on Windows. The following example loads the credential spec from a file named
     `C:\ProgramData\Docker\CredentialSpecs\my-credential-spec.json`:
     
    -    credential_spec:
    -      file: my-credential-spec.json
    +```yaml
    +credential_spec:
    +  file: my-credential-spec.json
    +```
     
     When using `registry:`, the credential spec is read from the Windows registry on
     the daemon's host. A registry value with the given name must be located in:
    @@ -512,8 +553,10 @@ the daemon's host. A registry value with the given name must be located in:
     The following example load the credential spec from a value named `my-credential-spec`
     in the registry:
     
    -    credential_spec:
    -      registry: my-credential-spec
    +```yaml
    +credential_spec:
    +  registry: my-credential-spec
    +```
     
     ### depends_on
     
    @@ -526,20 +569,25 @@ behaviors:
     - `docker-compose up SERVICE` automatically includes `SERVICE`'s
       dependencies. In the following example, `docker-compose up web` also
       creates and starts `db` and `redis`.
    +  
    +- `docker-compose stop` stops services in dependency order. In the following
    +  example, `web` is stopped before `db` and `redis`.
     
     Simple example:
     
    -    version: '3'
    -    services:
    -      web:
    -        build: .
    -        depends_on:
    -          - db
    -          - redis
    -      redis:
    -        image: redis
    -      db:
    -        image: postgres
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  web:
    +    build: .
    +    depends_on:
    +      - db
    +      - redis
    +  redis:
    +    image: redis
    +  db:
    +    image: postgres
    +```
     
     > There are several things to be aware of when using `depends_on`:
     >
    @@ -563,18 +611,19 @@ only takes effect when deploying to a [swarm](/engine/swarm/index.md) with
     [docker stack deploy](/engine/reference/commandline/stack_deploy.md), and is
     ignored by `docker-compose up` and `docker-compose run`.
     
    -    version: '3'
    -    services:
    -      redis:
    -        image: redis:alpine
    -        deploy:
    -          replicas: 6
    -          update_config:
    -            parallelism: 2
    -            delay: 10s
    -          restart_policy:
    -            condition: on-failure
    -
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  redis:
    +    image: redis:alpine
    +    deploy:
    +      replicas: 6
    +      update_config:
    +        parallelism: 2
    +        delay: 10s
    +      restart_policy:
    +        condition: on-failure
    +```
     
     Several sub-options are available:
     
    @@ -598,8 +647,8 @@ and the client connects directly to one of these. DNS round-robin is useful
     in cases where you want to use your own load balancer, or for Hybrid
     Windows and Linux applications.
     
    -```none
    -version: "3.3"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     
     services:
       wordpress:
    @@ -647,23 +696,26 @@ mode topics.
     Specify labels for the service. These labels are *only* set on the service,
     and *not* on any containers for the service.
     
    -    version: "3"
    -    services:
    -      web:
    -        image: web
    -        deploy:
    -          labels:
    -            com.example.description: "This label will appear on the web service"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  web:
    +    image: web
    +    deploy:
    +      labels:
    +        com.example.description: "This label will appear on the web service"
    +```
     
     To set labels on containers instead, use the `labels` key outside of `deploy`:
     
    -    version: "3"
    -    services:
    -      web:
    -        image: web
    -        labels:
    -          com.example.description: "This label will appear on all containers for the web service"
    -
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  web:
    +    image: web
    +    labels:
    +      com.example.description: "This label will appear on all containers for the web service"
    +```
     
     #### mode
     
    @@ -674,44 +726,50 @@ services](/engine/swarm/how-swarm-mode-works/services/#replicated-and-global-ser
     in the [swarm](/engine/swarm/) topics.)
     
     
    -    version: '3'
    -    services:
    -      worker:
    -        image: dockersamples/examplevotingapp_worker
    -        deploy:
    -          mode: global
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  worker:
    +    image: dockersamples/examplevotingapp_worker
    +    deploy:
    +      mode: global
    +```
     
     #### placement
     
     Specify placement of constraints and preferences. See the docker service create documentation for a full description of the syntax and available types of [constraints](/engine/reference/commandline/service_create.md#specify-service-constraints-constraint) and [preferences](/engine/reference/commandline/service_create.md#specify-service-placement-preferences-placement-pref).
     
    -    version: '3.3'
    -    services:
    -      db:
    -        image: postgres
    -        deploy:
    -          placement:
    -            constraints:
    -              - node.role == manager
    -              - engine.labels.operatingsystem == ubuntu 14.04
    -            preferences:
    -              - spread: node.labels.zone
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  db:
    +    image: postgres
    +    deploy:
    +      placement:
    +        constraints:
    +          - node.role == manager
    +          - engine.labels.operatingsystem == ubuntu 14.04
    +        preferences:
    +          - spread: node.labels.zone
    +```
     
     #### replicas
     
     If the service is `replicated` (which is the default), specify the number of
     containers that should be running at any given time.
     
    -    version: '3'
    -    services:
    -      worker:
    -        image: dockersamples/examplevotingapp_worker
    -        networks:
    -          - frontend
    -          - backend
    -        deploy:
    -          mode: replicated
    -          replicas: 6
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  worker:
    +    image: dockersamples/examplevotingapp_worker
    +    networks:
    +      - frontend
    +      - backend
    +    deploy:
    +      mode: replicated
    +      replicas: 6
    +```
     
     #### resources
     
    @@ -726,11 +784,11 @@ Each of these is a single value, analogous to its [docker service
     create](/engine/reference/commandline/service_create.md) counterpart.
     
     In this general example, the `redis` service is constrained to use no more than
    -50M of memory and `0.50` (50%) of available processing time (CPU), and has
    -`20M` of memory and `0.25` CPU time reserved (as always available to it).
    +50M of memory and `0.50` (50% of a single core) of available processing time (CPU), 
    +and has `20M` of memory and `0.25` CPU time reserved (as always available to it).
     
    -```none
    -version: '3'
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       redis:
         image: redis:alpine
    @@ -785,8 +843,8 @@ Configures if and how to restart containers when they exit. Replaces
       specified as a [duration](#specifying-durations) (default:
       decide immediately).
     
    -```none
    -version: "3"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       redis:
         image: redis:alpine
    @@ -828,8 +886,8 @@ updates.
     > **Note**: `order` is only supported for v3.4 and higher of the compose
     file format.
     
    -```none
    -version: '3.4'
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       vote:
         image: dockersamples/examplevotingapp_vote:before
    @@ -857,7 +915,6 @@ The following sub-options (supported for `docker-compose up` and `docker-compose
     - [network_mode](#network_mode)
     - [restart](#restart)
     - [security_opt](#security_opt)
    -- [stop_signal](#stop_signal)
     - [sysctls](#sysctls)
     - [userns_mode](#userns_mode)
     
    @@ -873,8 +930,10 @@ access to the requisite volumes.
     List of device mappings.  Uses the same format as the `--device` docker
     client create option.
     
    -    devices:
    -      - "/dev/ttyUSB0:/dev/ttyUSB0"
    +```yaml
    +devices:
    +  - "/dev/ttyUSB0:/dev/ttyUSB0"
    +```
     
     > **Note**: This option is ignored when
     > [deploying a stack in swarm mode](/engine/reference/commandline/stack_deploy.md)
    @@ -897,17 +956,19 @@ behaviors:
     
     Simple example:
     
    -    version: '3'
    -    services:
    -      web:
    -        build: .
    -        depends_on:
    -          - db
    -          - redis
    -      redis:
    -        image: redis
    -      db:
    -        image: postgres
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  web:
    +    build: .
    +    depends_on:
    +      - db
    +      - redis
    +  redis:
    +    image: redis
    +  db:
    +    image: postgres
    +```
     
     > There are several things to be aware of when using `depends_on`:
     >
    @@ -926,36 +987,50 @@ Simple example:
     
     Custom DNS servers. Can be a single value or a list.
     
    -    dns: 8.8.8.8
    -    dns:
    -      - 8.8.8.8
    -      - 9.9.9.9
    +```yaml
    +dns: 8.8.8.8
    +```
    +
    +```yaml
    +dns:
    +  - 8.8.8.8
    +  - 9.9.9.9
    +```
     
     ### dns_search
     
     Custom DNS search domains. Can be a single value or a list.
     
    -    dns_search: example.com
    -    dns_search:
    -      - dc1.example.com
    -      - dc2.example.com
    +```yaml
    +dns_search: example.com
    +```
    +
    +```yaml
    +dns_search:
    +  - dc1.example.com
    +  - dc2.example.com
    +```
     
     ### entrypoint
     
     Override the default entrypoint.
     
    -    entrypoint: /code/entrypoint.sh
    +```yaml
    +entrypoint: /code/entrypoint.sh
    +```
     
     The entrypoint can also be a list, in a manner similar to
     [dockerfile](/engine/reference/builder.md#entrypoint):
     
    -    entrypoint:
    -        - php
    -        - -d
    -        - zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-20100525/xdebug.so
    -        - -d
    -        - memory_limit=-1
    -        - vendor/bin/phpunit
    +```yaml
    +entrypoint:
    +    - php
    +    - -d
    +    - zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-20100525/xdebug.so
    +    - -d
    +    - memory_limit=-1
    +    - vendor/bin/phpunit
    +```
     
     > **Note**: Setting `entrypoint` both overrides any default entrypoint set
     > on the service's image with the `ENTRYPOINT` Dockerfile instruction, *and*
    @@ -973,19 +1048,25 @@ Environment variables declared in the [environment](#environment) section
     _override_ these values – this holds true even if those values are
     empty or undefined.
     
    -    env_file: .env
    +```yaml
    +env_file: .env
    +```
     
    -    env_file:
    -      - ./common.env
    -      - ./apps/web.env
    -      - /opt/secrets.env
    +```yaml
    +env_file:
    +  - ./common.env
    +  - ./apps/web.env
    +  - /opt/secrets.env
    +```
     
     Compose expects each line in an env file to be in `VAR=VAL` format. Lines
     beginning with `#` are treated as comments and are ignored. Blank lines are
     also ignored.
     
    -    # Set Rails/Rack environment
    -    RACK_ENV=development
    +```bash
    +# Set Rails/Rack environment
    +RACK_ENV=development
    +```
     
     > **Note**: If your service specifies a [build](#build) option, variables
     > defined in environment files are _not_ automatically visible during the
    @@ -1003,7 +1084,7 @@ list are processed from the top down. For the same variable specified in file
     listed below (after), then the value from `b.env` stands. For example, given the
     following declaration in `docker-compose.yml`:
     
    -```none
    +```yaml
     services:
       some-service:
         env_file:
    @@ -1013,19 +1094,19 @@ services:
     
     And the following files:
     
    -```none
    +```bash
     # a.env
     VAR=1
     ```
     
     and
     
    -```none
    +```bash
     # b.env
     VAR=hello
     ```
     
    -$VAR is `hello`.
    +`$VAR` is `hello`.
     
     ### environment
     
    @@ -1036,15 +1117,19 @@ they are not converted to True or False by the YML parser.
     Environment variables with only a key are resolved to their values on the
     machine Compose is running on, which can be helpful for secret or host-specific values.
     
    -    environment:
    -      RACK_ENV: development
    -      SHOW: 'true'
    -      SESSION_SECRET:
    +```yaml
    +environment:
    +  RACK_ENV: development
    +  SHOW: 'true'
    +  SESSION_SECRET:
    +```
     
    -    environment:
    -      - RACK_ENV=development
    -      - SHOW=true
    -      - SESSION_SECRET
    +```yaml
    +environment:
    +  - RACK_ENV=development
    +  - SHOW=true
    +  - SESSION_SECRET
    +```
     
     > **Note**: If your service specifies a [build](#build) option, variables
     > defined in `environment` are _not_ automatically visible during the
    @@ -1056,9 +1141,11 @@ machine Compose is running on, which can be helpful for secret or host-specific
     Expose ports without publishing them to the host machine - they'll only be
     accessible to linked services. Only the internal port can be specified.
     
    -    expose:
    -     - "3000"
    -     - "8000"
    +```yaml
    +expose:
    + - "3000"
    + - "8000"
    +```
     
     ### external_links
     
    @@ -1067,10 +1154,12 @@ Compose, especially for containers that provide shared or common services.
     `external_links` follow semantics similar to the legacy option `links` when
     specifying both the container name and the link alias (`CONTAINER:ALIAS`).
     
    -    external_links:
    -     - redis_1
    -     - project_db_1:mysql
    -     - project_db_1:postgresql
    +```yaml
    +external_links:
    + - redis_1
    + - project_db_1:mysql
    + - project_db_1:postgresql
    +```
     
     > **Notes:**
     >
    @@ -1086,14 +1175,18 @@ with a (version 3) Compose file.
     
     Add hostname mappings. Use the same values as the docker client `--add-host` parameter.
     
    -    extra_hosts:
    -     - "somehost:162.242.195.82"
    -     - "otherhost:50.31.209.229"
    +```yaml
    +extra_hosts:
    + - "somehost:162.242.195.82"
    + - "otherhost:50.31.209.229"
    +```
     
     An entry with the ip address and hostname is created in `/etc/hosts` inside containers for this service, e.g:
     
    -    162.242.195.82  somehost
    -    50.31.209.229   otherhost
    +```none
    +162.242.195.82  somehost
    +50.31.209.229   otherhost
    +```
     
     ### healthcheck
     
    @@ -1104,12 +1197,14 @@ service are "healthy". See the docs for the
     [HEALTHCHECK Dockerfile instruction](/engine/reference/builder.md#healthcheck)
     for details on how healthchecks work.
     
    -    healthcheck:
    -      test: ["CMD", "curl", "-f", "http://localhost"]
    -      interval: 1m30s
    -      timeout: 10s
    -      retries: 3
    -      start_period: 40s
    +```yaml
    +healthcheck:
    +  test: ["CMD", "curl", "-f", "http://localhost"]
    +  interval: 1m30s
    +  timeout: 10s
    +  retries: 3
    +  start_period: 40s
    +```
     
     `interval`, `timeout` and `start_period` are specified as [durations](#specifying-durations).
     
    @@ -1120,18 +1215,28 @@ file format.
     either `NONE`, `CMD` or `CMD-SHELL`. If it's a string, it's equivalent to
     specifying `CMD-SHELL` followed by that string.
     
    -    # Hit the local web app
    -    test: ["CMD", "curl", "-f", "http://localhost"]
    +```yaml
    +# Hit the local web app
    +test: ["CMD", "curl", "-f", "http://localhost"]
    +```
     
    -    # As above, but wrapped in /bin/sh. Both forms below are equivalent.
    -    test: ["CMD-SHELL", "curl -f http://localhost || exit 1"]
    -    test: curl -f https://localhost || exit 1
    +As above, but wrapped in `/bin/sh`. Both forms below are equivalent.
    +
    +```yaml
    +test: ["CMD-SHELL", "curl -f http://localhost || exit 1"]
    +```
    +
    +```yaml
    +test: curl -f https://localhost || exit 1
    +```
     
     To disable any default healthcheck set by the image, you can use `disable:
     true`. This is equivalent to specifying `test: ["NONE"]`.
     
    -    healthcheck:
    -      disable: true
    +```yaml
    +healthcheck:
    +  disable: true
    +```
     
     ### image
     
    @@ -1153,21 +1258,20 @@ options and tags it with the specified tag.
     > [Added in version 3.7 file format](compose-versioning.md#version-37).
     
     Run an init inside the container that forwards signals and reaps processes.
    -Either set a boolean value to use the default `init`, or specify a path to
    -a custom one.
    +Set this option to `true` to enable this feature for the service.
     
    -    version: '3.7'
    -    services:
    -      web:
    -        image: alpine:latest
    -        init: true
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  web:
    +    image: alpine:latest
    +    init: true
    +```
     
    -
    -    version: '2.2'
    -    services:
    -      web:
    -        image: alpine:latest
    -        init: /usr/libexec/docker-init
    +> The default init binary that is used is [Tini](https://github.com/krallin/tini),
    +> and is installed in `/usr/libexec/docker-init` on the daemon host. You can
    +> configure the daemon to use a custom init binary through the
    +> [`init-path` configuration option](/engine/reference/commandline/dockerd/#daemon-configuration-file).
     
     ### isolation
     
    @@ -1183,15 +1287,19 @@ Add metadata to containers using [Docker labels](/engine/userguide/labels-custom
     
     It's recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.
     
    -    labels:
    -      com.example.description: "Accounting webapp"
    -      com.example.department: "Finance"
    -      com.example.label-with-empty-value: ""
    +```yaml
    +labels:
    +  com.example.description: "Accounting webapp"
    +  com.example.department: "Finance"
    +  com.example.label-with-empty-value: ""
    +```
     
    -    labels:
    -      - "com.example.description=Accounting webapp"
    -      - "com.example.department=Finance"
    -      - "com.example.label-with-empty-value"
    +```yaml
    +labels:
    +  - "com.example.description=Accounting webapp"
    +  - "com.example.department=Finance"
    +  - "com.example.label-with-empty-value"
    +```
     
     ### links
     
    @@ -1208,11 +1316,13 @@ containers in a more controlled way.
     Link to containers in another service. Either specify both the service name and
     a link alias (`SERVICE:ALIAS`), or just the service name.
     
    -    web:
    -      links:
    -       - db
    -       - db:database
    -       - redis
    +```yaml
    +web:
    +  links:
    +   - db
    +   - db:database
    +   - redis
    +```
     
     Containers for the linked service are reachable at a hostname identical to
     the alias, or the service name if no alias was specified.
    @@ -1238,10 +1348,12 @@ Links also express dependency between services in the same way as
     
     Logging configuration for the service.
     
    -    logging:
    -      driver: syslog
    -      options:
    -        syslog-address: "tcp://192.168.0.42:123"
    +```yaml
    +logging:
    +  driver: syslog
    +  options:
    +    syslog-address: "tcp://192.168.0.42:123"
    +```
     
     The `driver`  name specifies a logging driver for the service's
     containers, as with the ``--log-driver`` option for docker run
    @@ -1261,15 +1373,19 @@ Specify logging options for the logging driver with the ``options`` key, as with
     
     Logging options are key-value pairs. An example of `syslog` options:
     
    -    driver: "syslog"
    -    options:
    -      syslog-address: "tcp://192.168.0.42:123"
    +```yaml
    +driver: "syslog"
    +options:
    +  syslog-address: "tcp://192.168.0.42:123"
    +```
     
     The default driver [json-file](/engine/admin/logging/overview.md#json-file), has options to limit the amount of logs stored. To do this, use a key-value pair for maximum storage size and maximum number of files:
     
    -    options:
    -      max-size: "200k"
    -      max-file: "10"
    +```yaml
    +options:
    +  max-size: "200k"
    +  max-file: "10"
    +```
     
     The example shown above would store log files until they reach a `max-size` of
     200kB, and then rotate them. The amount of individual log files stored is
    @@ -1278,14 +1394,17 @@ files are removed to allow storage of new logs.
     
     Here is an example `docker-compose.yml` file that limits logging storage:
     
    -    services:
    -      some-service:
    -        image: some-service
    -        logging:
    -          driver: "json-file"
    -          options:
    -            max-size: "200k"
    -            max-file: "10"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +services:
    +  some-service:
    +    image: some-service
    +    logging:
    +      driver: "json-file"
    +      options:
    +        max-size: "200k"
    +        max-file: "10"
    +```
     
     > Logging options available depend on which logging driver you use
     >
    @@ -1320,11 +1439,13 @@ the special form `service:[service name]`.
     Networks to join, referencing entries under the
     [top-level `networks` key](#network-configuration-reference).
     
    -    services:
    -      some-service:
    -        networks:
    -         - some-network
    -         - other-network
    +```yaml
    +services:
    +  some-service:
    +    networks:
    +     - some-network
    +     - other-network
    +```
     
     #### aliases
     
    @@ -1336,48 +1457,52 @@ Since `aliases` is network-scoped, the same service can have different aliases o
     
     The general format is shown here.
     
    -    services:
    -      some-service:
    -        networks:
    -          some-network:
    -            aliases:
    -             - alias1
    -             - alias3
    -          other-network:
    -            aliases:
    -             - alias2
    +```yaml
    +services:
    +  some-service:
    +    networks:
    +      some-network:
    +        aliases:
    +         - alias1
    +         - alias3
    +      other-network:
    +        aliases:
    +         - alias2
    +```
     
     In the example below, three services are provided (`web`, `worker`, and `db`),
     along with two networks (`new` and `legacy`). The `db` service is reachable at
     the hostname `db` or `database` on the `new` network, and at `db` or `mysql` on
     the `legacy` network.
     
    -    version: '2'
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     
    -    services:
    -      web:
    -        build: ./web
    -        networks:
    -          - new
    +services:
    +  web:
    +    image: "nginx:alpine"
    +    networks:
    +      - new
     
    -      worker:
    -        build: ./worker
    -        networks:
    -          - legacy
    -
    -      db:
    -        image: mysql
    -        networks:
    -          new:
    -            aliases:
    -              - database
    -          legacy:
    -            aliases:
    -              - mysql
    +  worker:
    +    image: "my-worker-image:latest"
    +    networks:
    +      - legacy
     
    +  db:
    +    image: mysql
         networks:
           new:
    +        aliases:
    +          - database
           legacy:
    +        aliases:
    +          - mysql
    +
    +networks:
    +  new:
    +  legacy:
    +```
     
     #### ipv4_address, ipv6_address
     
    @@ -1385,21 +1510,20 @@ Specify a static IP address for containers for this service when joining the net
     
     The corresponding network configuration in the
     [top-level networks section](#network-configuration-reference) must have an
    -`ipam` block with subnet configurations covering each static address. If IPv6
    -addressing is desired, the [`enable_ipv6`](#enableipv6) option must be set, and
    -you must use a version 2.x Compose file, such as the one below.
    +`ipam` block with subnet configurations covering each static address.
     
    -> **Note**: These options do not currently work in swarm mode.
    +> If IPv6 addressing is desired, the [`enable_ipv6`](compose-file-v2.md##enable_ipv6)
    +> option must be set, and you must use a [version 2.x Compose file](compose-file-v2.md#ipv4_address-ipv6_address).
    +> _IPv6 options do not currently work in swarm mode_.
     
     An example:
     
     ```yaml
    -version: '2.1'
    +version: "{{ site.compose_file_v3 }}"
     
     services:
       app:
    -    image: busybox
    -    command: ifconfig
    +    image: nginx:alpine
         networks:
           app_net:
             ipv4_address: 172.16.238.10
    @@ -1407,15 +1531,11 @@ services:
     
     networks:
       app_net:
    -    driver: bridge
    -    enable_ipv6: true
         ipam:
           driver: default
           config:
    -      -
    -        subnet: 172.16.238.0/24
    -      -
    -        subnet: 2001:3984:3989::/64
    +        - subnet: "172.16.238.0/24"
    +        - subnet: "2001:3984:3989::/64"
     ```
     
     ### pid
    @@ -1443,15 +1563,17 @@ port (an ephemeral host port is chosen).
     > parses numbers in the format `xx:yy` as a base-60 value. For this reason,
     > we recommend always explicitly specifying your port mappings as strings.
     
    -    ports:
    -     - "3000"
    -     - "3000-3005"
    -     - "8000:8000"
    -     - "9090-9091:8080-8081"
    -     - "49100:22"
    -     - "127.0.0.1:8001:8001"
    -     - "127.0.0.1:5000-5010:5000-5010"
    -     - "6060:6060/udp"
    +```yaml
    +ports:
    + - "3000"
    + - "3000-3005"
    + - "8000:8000"
    + - "9090-9091:8080-8081"
    + - "49100:22"
    + - "127.0.0.1:8001:8001"
    + - "127.0.0.1:5000-5010:5000-5010"
    + - "6060:6060/udp"
    +```
     
     #### Long syntax
     
    @@ -1464,13 +1586,12 @@ expressed in the short form.
     - `mode`: `host` for publishing a host port on each node, or `ingress` for a swarm
        mode port to be load balanced.
     
    -```none
    +```yaml
     ports:
       - target: 80
         published: 8080
         protocol: tcp
         mode: host
    -
     ```
     
     > **Note**: The long syntax is new in v3.2
    @@ -1517,8 +1638,8 @@ already been defined in Docker, either by running the `docker secret create`
     command or by another stack deployment. If the external secret does not exist,
     the stack deployment fails with a `secret not found` error.
     
    -```none
    -version: "3.1"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       redis:
         image: redis:latest
    @@ -1560,8 +1681,8 @@ container, sets the mode to `0440` (group-readable) and sets the user and group
     to `103`. The `redis` service does not have access to the `my_other_secret`
     secret.
     
    -```none
    -version: "3.1"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       redis:
         image: redis:latest
    @@ -1587,9 +1708,11 @@ short syntax. Defining a secret does not imply granting a service access to it.
     
     Override the default labeling scheme for each container.
     
    -    security_opt:
    -      - label:user:USER
    -      - label:role:ROLE
    +```yaml
    +security_opt:
    +  - label:user:USER
    +  - label:role:ROLE
    +```
     
     > **Note**: This option is ignored when
     > [deploying a stack in swarm mode](/engine/reference/commandline/stack_deploy.md)
    @@ -1614,24 +1737,26 @@ Sets an alternative signal to stop the container. By default `stop` uses
     SIGTERM. Setting an alternative signal using `stop_signal` causes
     `stop` to send that signal instead.
     
    -    stop_signal: SIGUSR1
    -
    -> **Note**: This option is ignored when
    -> [deploying a stack in swarm mode](/engine/reference/commandline/stack_deploy.md)
    -> with a (version 3) Compose file.
    +```yaml
    +stop_signal: SIGUSR1
    +```
     
     ### sysctls
     
     Kernel parameters to set in the container. You can use either an array or a
     dictionary.
     
    -    sysctls:
    -      net.core.somaxconn: 1024
    -      net.ipv4.tcp_syncookies: 0
    +```yaml
    +sysctls:
    +  net.core.somaxconn: 1024
    +  net.ipv4.tcp_syncookies: 0
    +```
     
    -    sysctls:
    -      - net.core.somaxconn=1024
    -      - net.ipv4.tcp_syncookies=0
    +```yaml
    +sysctls:
    +  - net.core.somaxconn=1024
    +  - net.ipv4.tcp_syncookies=0
    +```
     
     > **Note**: This option is ignored when
     > [deploying a stack in swarm mode](/engine/reference/commandline/stack_deploy.md)
    @@ -1643,10 +1768,15 @@ dictionary.
     
     Mount a temporary file system inside the container. Can be a single value or a list.
     
    -    tmpfs: /run
    -    tmpfs:
    -      - /run
    -      - /tmp
    +```yaml
    +tmpfs: /run
    +```
    +
    +```yaml
    +tmpfs:
    +  - /run
    +  - /tmp
    +```
     
     > **Note**: This option is ignored when
     > [deploying a stack in swarm mode](/engine/reference/commandline/stack_deploy.md)
    @@ -1657,10 +1787,12 @@ Mount a temporary file system inside the container. Can be a single value or a l
     Mount a temporary file system inside the container. Size parameter specifies the size
     of the tmpfs mount in bytes. Unlimited by default.
     
    -     - type: tmpfs
    -         target: /app
    -         tmpfs:
    -           size: 1000
    +```yaml
    + - type: tmpfs
    +     target: /app
    +     tmpfs:
    +       size: 1000
    +```
     
     ### ulimits
     
    @@ -1668,15 +1800,19 @@ Override the default ulimits for a container. You can either specify a single
     limit as an integer or soft/hard limits as a mapping.
     
     
    -    ulimits:
    -      nproc: 65535
    -      nofile:
    -        soft: 20000
    -        hard: 40000
    +```yaml
    +ulimits:
    +  nproc: 65535
    +  nofile:
    +    soft: 20000
    +    hard: 40000
    +```
     
     ### userns_mode
     
    -    userns_mode: "host"
    +```yaml
    +userns_mode: "host"
    +```
     
     Disables the user namespace for this service, if Docker daemon is configured with user namespaces.
     See [dockerd](/engine/reference/commandline/dockerd.md#disable-user-namespace-for-a-container) for
    @@ -1710,8 +1846,8 @@ path under `db` service `volumes`), but defines it using the old string format
     for mounting a named volume. Named volumes must be listed under the top-level
     `volumes` key, as shown.
     
    -```none
    -version: "3.2"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       web:
         image: nginx:alpine
    @@ -1749,22 +1885,23 @@ You can mount a relative path on the host, that expands relative to
     the directory of the Compose configuration file being used. Relative paths
     should always begin with `.` or `..`.
     
    -    volumes:
    -      # Just specify a path and let the Engine create a volume
    -      - /var/lib/mysql
    +```yaml
    +volumes:
    +  # Just specify a path and let the Engine create a volume
    +  - /var/lib/mysql
     
    -      # Specify an absolute path mapping
    -      - /opt/data:/var/lib/mysql
    +  # Specify an absolute path mapping
    +  - /opt/data:/var/lib/mysql
     
    -      # Path on the host, relative to the Compose file
    -      - ./cache:/tmp/cache
    +  # Path on the host, relative to the Compose file
    +  - ./cache:/tmp/cache
     
    -      # User-relative path
    -      - ~/configs:/etc/configs/:ro
    -
    -      # Named volume
    -      - datavolume:/var/lib/mysql
    +  # User-relative path
    +  - ~/configs:/etc/configs/:ro
     
    +  # Named volume
    +  - datavolume:/var/lib/mysql
    +```
     
     #### Long syntax
     
    @@ -1786,8 +1923,8 @@ expressed in the short form.
       - `size`: the size for the tmpfs mount in bytes
     - `consistency`: the consistency requirements of the mount, one of `consistent` (host and container have identical view), `cached` (read cache, host view is authoritative) or `delegated` (read-write cache, container's view is authoritative)
     
    -```none
    -version: "3.2"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       web:
         image: nginx:alpine
    @@ -1834,8 +1971,8 @@ Labs](https://github.com/docker/labs/blob/master/beginner/chapters/votingapp.md)
     configured as a named volume to persist the data on the swarm,
     _and_ is constrained to run only on `manager` nodes. Here is the relevant snip-it from that file:
     
    -```none
    -version: "3"
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       db:
         image: postgres:9.4
    @@ -1870,8 +2007,8 @@ are visible on the host.
     
     Here is an example of configuring a volume as `cached`:
     
    -```none
    -version: '3'
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       php:
         image: php:7.1-fpm
    @@ -1888,24 +2025,25 @@ volume mounts (shared filesystems)](/docker-for-mac/osxfs-caching.md).
     ### domainname, hostname, ipc, mac\_address, privileged, read\_only, shm\_size, stdin\_open, tty, user, working\_dir
     
     Each of these is a single value, analogous to its
    -[docker run](/engine/reference/run.md) counterpart.
    +[docker run](/engine/reference/run.md) counterpart. Note that `mac_address` is a legacy option.
     
    -    user: postgresql
    -    working_dir: /code
    +```yaml
    +user: postgresql
    +working_dir: /code
     
    -    domainname: foo.com
    -    hostname: foo
    -    ipc: host
    -    mac_address: 02:42:ac:11:65:43
    +domainname: foo.com
    +hostname: foo
    +ipc: host
    +mac_address: 02:42:ac:11:65:43
     
    -    privileged: true
    +privileged: true
     
     
    -    read_only: true
    -    shm_size: 64M
    -    stdin_open: true
    -    tty: true
    -
    +read_only: true
    +shm_size: 64M
    +stdin_open: true
    +tty: true
    +```
     
     ## Specifying durations
     
    @@ -1954,20 +2092,22 @@ Here's an example of a two-service setup where a database's data directory is
     shared with another service as a volume so that it can be periodically backed
     up:
     
    -    version: "3"
    -
    -    services:
    -      db:
    -        image: db
    -        volumes:
    -          - data-volume:/var/lib/db
    -      backup:
    -        image: backup-service
    -        volumes:
    -          - data-volume:/var/lib/backup/data
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     
    +services:
    +  db:
    +    image: db
         volumes:
    -      data-volume:
    +      - data-volume:/var/lib/db
    +  backup:
    +    image: backup-service
    +    volumes:
    +      - data-volume:/var/lib/backup/data
    +
    +volumes:
    +  data-volume:
    +```
     
     An entry under the top-level `volumes` key can be empty, in which case it
     uses the default driver configured by the Engine (in most cases, this is the
    @@ -1980,7 +2120,9 @@ driver the Docker Engine has been configured to use, which in most cases is
     `local`. If the driver is not available, the Engine returns an error when
     `docker-compose up` tries to create the volume.
     
    -     driver: foobar
    +```yaml
    +driver: foobar
    +```
     
     ### driver_opts
     
    @@ -1988,12 +2130,14 @@ Specify a list of options as key-value pairs to pass to the driver for this
     volume. Those options are driver-dependent - consult the driver's
     documentation for more information. Optional.
     
    -    volumes:
    -      example:
    -        driver_opts:
    -          type: "nfs"
    -          o: "addr=10.40.0.199,nolock,soft,rw"
    -          device: ":/docker/example"
    +```yaml
    +volumes:
    +  example:
    +    driver_opts:
    +      type: "nfs"
    +      o: "addr=10.40.0.199,nolock,soft,rw"
    +      device: ":/docker/example"
    +```
     
     ### external
     
    @@ -2010,17 +2154,19 @@ In the example below, instead of attempting to create a volume called
     `[projectname]_data`, Compose looks for an existing volume simply
     called `data` and mount it into the `db` service's containers.
     
    -    version: '3'
    -
    -    services:
    -      db:
    -        image: postgres
    -        volumes:
    -          - data:/var/lib/postgresql/data
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     
    +services:
    +  db:
    +    image: postgres
         volumes:
    -      data:
    -        external: true
    +      - data:/var/lib/postgresql/data
    +
    +volumes:
    +  data:
    +    external: true
    +```
     
     > [external.name was deprecated in version 3.4 file format](compose-versioning.md#version-34)
     > use `name` instead.
    @@ -2028,10 +2174,12 @@ called `data` and mount it into the `db` service's containers.
     You can also specify the name of the volume separately from the name used to
     refer to it within the Compose file:
     
    -    volumes:
    -      data:
    -        external:
    -          name: actual-name-of-volume
    +```yaml
    +volumes:
    +  data:
    +    external:
    +      name: actual-name-of-volume
    +```
     
     > External volumes are always created with docker stack deploy
     >
    @@ -2053,15 +2201,19 @@ an array or a dictionary.
     It's recommended that you use reverse-DNS notation to prevent your labels from
     conflicting with those used by other software.
     
    -    labels:
    -      com.example.description: "Database volume"
    -      com.example.department: "IT/Ops"
    -      com.example.label-with-empty-value: ""
    +```yaml
    +labels:
    +  com.example.description: "Database volume"
    +  com.example.department: "IT/Ops"
    +  com.example.label-with-empty-value: ""
    +```
     
    -    labels:
    -      - "com.example.description=Database volume"
    -      - "com.example.department=IT/Ops"
    -      - "com.example.label-with-empty-value"
    +```yaml
    +labels:
    +  - "com.example.description=Database volume"
    +  - "com.example.department=IT/Ops"
    +  - "com.example.label-with-empty-value"
    +```
     
     ### name
     
    @@ -2071,18 +2223,22 @@ Set a custom name for this volume. The name field can be used to reference
     volumes that contain special characters. The name is used as is
     and will **not** be scoped with the stack name.
     
    -    version: '3.4'
    -    volumes:
    -      data:
    -        name: my-app-data
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +volumes:
    +  data:
    +    name: my-app-data
    +```
     
     It can also be used in conjunction with the `external` property:
     
    -    version: '3.4'
    -    volumes:
    -      data:
    -        external: true
    -        name: my-app-data
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +volumes:
    +  data:
    +    external: true
    +    name: my-app-data
    +```
     
     ## Network configuration reference
     
    @@ -2106,7 +2262,9 @@ Swarm.
     
     The Docker Engine returns an error if the driver is not available.
     
    -    driver: overlay
    +```yaml
    +driver: overlay
    +```
     
     #### bridge
     
    @@ -2135,16 +2293,19 @@ Use the host's networking stack, or no networking. Equivalent to
     `docker stack` commands. If you use the `docker-compose` command,
     use [network_mode](#network_mode) instead.
     
    -The syntax for using built-in networks like `host` and `none` is a little
    +If you want to use a particular network on a common build, use [network] as 
    +mentioned in the second yaml file example.  
    +
    +The syntax for using built-in networks such as `host` and `none` is a little
     different. Define an external network with the name `host` or `none` (that
     Docker has already created automatically) and an alias that Compose can use
    -(`hostnet` or `nonet` in these examples), then grant the service access to that
    -network, using the alias.
    +(`hostnet` or `nonet` in the following examples), then grant the service access to that
    +network using the alias.
     
     ```yaml
    +version: "{{ site.compose_file_v3 }}"
     services:
       web:
    -    ...
         networks:
           hostnet: {}
     
    @@ -2154,6 +2315,17 @@ networks:
         name: host
     ```
     
    +```yaml
    +services:
    +  web:
    +    ...
    +    build:
    +      ...
    +      network: host
    +      context: .
    +      ...
    +```
    +
     ```yaml
     services:
       web:
    @@ -2173,9 +2345,11 @@ Specify a list of options as key-value pairs to pass to the driver for this
     network. Those options are driver-dependent - consult the driver's
     documentation for more information. Optional.
     
    -      driver_opts:
    -        foo: "bar"
    -        baz: 1
    +```yaml
    +driver_opts:
    +  foo: "bar"
    +  baz: 1
    +```
     
     ### attachable
     
    @@ -2216,10 +2390,12 @@ which is optional:
     
     A full example:
     
    -    ipam:
    -      driver: default
    -      config:
    -        - subnet: 172.28.0.0/16
    +```yaml
    +ipam:
    +  driver: default
    +  config:
    +    - subnet: 172.28.0.0/16
    +```
     
     > **Note**: Additional IPAM configurations, such as `gateway`, are only honored for version 2 at the moment.
     
    @@ -2238,15 +2414,19 @@ an array or a dictionary.
     It's recommended that you use reverse-DNS notation to prevent your labels from
     conflicting with those used by other software.
     
    -    labels:
    -      com.example.description: "Financial transaction network"
    -      com.example.department: "Finance"
    -      com.example.label-with-empty-value: ""
    +```yaml
    +labels:
    +  com.example.description: "Financial transaction network"
    +  com.example.department: "Finance"
    +  com.example.label-with-empty-value: ""
    +```
     
    -    labels:
    -      - "com.example.description=Financial transaction network"
    -      - "com.example.department=Finance"
    -      - "com.example.label-with-empty-value"
    +```yaml
    +labels:
    +  - "com.example.description=Financial transaction network"
    +  - "com.example.department=Finance"
    +  - "com.example.label-with-empty-value"
    +```
     
     ### external
     
    @@ -2264,23 +2444,24 @@ attempting to create a network called `[projectname]_outside`, Compose
     looks for an existing network simply called `outside` and connect the `proxy`
     service's containers to it.
     
    -    version: '3'
    -
    -    services:
    -      proxy:
    -        build: ./proxy
    -        networks:
    -          - outside
    -          - default
    -      app:
    -        build: ./app
    -        networks:
    -          - default
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
     
    +services:
    +  proxy:
    +    build: ./proxy
         networks:
    -      outside:
    -        external: true
    +      - outside
    +      - default
    +  app:
    +    build: ./app
    +    networks:
    +      - default
     
    +networks:
    +  outside:
    +    external: true
    +```
     
     > [external.name was deprecated in version 3.5 file format](compose-versioning.md#version-35)
     > use `name` instead.
    @@ -2288,10 +2469,13 @@ service's containers to it.
     You can also specify the name of the network separately from the name used to
     refer to it within the Compose file:
     
    -    networks:
    -      outside:
    -        external:
    -          name: actual-name-of-network
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +networks:
    +  outside:
    +    external:
    +      name: actual-name-of-network
    +```
     
     ### name
     
    @@ -2301,18 +2485,22 @@ Set a custom name for this network. The name field can be used to reference
     networks which contain special characters. The name is used as is
     and will **not** be scoped with the stack name.
     
    -    version: '3.5'
    -    networks:
    -      network1:
    -        name: my-app-net
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +networks:
    +  network1:
    +    name: my-app-net
    +```
     
     It can also be used in conjunction with the `external` property:
     
    -    version: '3.5'
    -    networks:
    -      network1:
    -        external: true
    -        name: my-app-net
    +```yaml
    +version: "{{ site.compose_file_v3 }}"
    +networks:
    +  network1:
    +    external: true
    +    name: my-app-net
    +```
     
     ## configs configuration reference
     
    @@ -2334,7 +2522,7 @@ In this example, `my_first_config` is created (as
     `_my_first_config)`when the stack is deployed,
     and `my_second_config` already exists in Docker.
     
    -```none
    +```yaml
     configs:
       my_first_config:
         file: ./config_data
    @@ -2347,7 +2535,7 @@ is different from the name that exists within the service. The following
     example modifies the previous one to use the external config called
     `redis_config`.
     
    -```none
    +```yaml
     configs:
       my_first_config:
         file: ./config_data
    @@ -2381,7 +2569,7 @@ In this example, `my_first_secret` is created as
     `_my_first_secret `when the stack is deployed,
     and `my_second_secret` already exists in Docker.
     
    -```none
    +```yaml
     secrets:
       my_first_secret:
         file: ./secret_data
    @@ -2395,7 +2583,8 @@ example modifies the previous one to use the external secret called
     `redis_secret`.
     
     ### Compose File v3.5 and above
    -```none
    +
    +```yaml
     secrets:
       my_first_secret:
         file: ./secret_data
    @@ -2405,7 +2594,8 @@ secrets:
     ```
     
     ### Compose File v3.4 and under
    -```none    
    +
    +```yaml
       my_second_secret:
         external:
           name: redis_secret
    diff --git a/compose/environment-variables.md b/compose/environment-variables.md
    index 42680cce17..662eb23060 100644
    --- a/compose/environment-variables.md
    +++ b/compose/environment-variables.md
    @@ -53,7 +53,7 @@ the same variable in the shell in which Compose is run.
     ## The “env_file” configuration option
     
     You can pass multiple environment variables from an external file through to
    -a service's containers with the ['env_file' option](compose-file.md#envfile),
    +a service's containers with the ['env_file' option](compose-file.md#env_file),
     just like with `docker run --env-file=FILE ...`:
     
     ```yaml
    diff --git a/compose/extends.md b/compose/extends.md
    index 4a253012b6..0af8391a61 100644
    --- a/compose/extends.md
    +++ b/compose/extends.md
    @@ -44,7 +44,7 @@ relative to the base file.
     
     ### Example use case
     
    -In this section are two common use cases for multiple compose files: changing a
    +In this section, there are two common use cases for multiple Compose files: changing a
     Compose app for different environments, and running administrative tasks
     against a Compose app.
     
    diff --git a/compose/gettingstarted.md b/compose/gettingstarted.md
    index 822df6a0e9..35189c3ce4 100644
    --- a/compose/gettingstarted.md
    +++ b/compose/gettingstarted.md
    @@ -119,15 +119,18 @@ the following:
           redis:
             image: "redis:alpine"
     
    -This Compose file defines two services, `web` and `redis`. The `web` service:
    +This Compose file defines two services: `web` and `redis`. 
     
    -* Uses an image that's built from the `Dockerfile` in the current directory.
    -* Forwards the exposed port 5000 on the container to port 5000 on the host
    -  machine. We use the default port for the Flask web server, `5000`.
    +### Web service
     
    -The `redis` service uses a public
    -[Redis](https://registry.hub.docker.com/_/redis/) image pulled from the Docker
    -Hub registry.
    +The `web` service uses an image that's built from the `Dockerfile` in the current directory.
    +It then binds the container and the host machine to the exposed port, `5000`. This example service uses the default port for 
    +the Flask web server, `5000`.
    +
    +### Redis service
    +
    +The `redis` service uses a public [Redis](https://registry.hub.docker.com/_/redis/) 
    +image pulled from the Docker Hub registry.
     
     ## Step 4: Build and run your app with Compose
     
    diff --git a/compose/install.md b/compose/install.md
    index 4b190c3d6f..b739bf792e 100644
    --- a/compose/install.md
    +++ b/compose/install.md
    @@ -29,14 +29,26 @@ Follow the instructions below to install Compose on Mac, Windows, Windows Server
     2016, or Linux systems, or find out about alternatives like using the `pip`
     Python package manager or installing Compose as a container.
     
    +> Install a different version
    +> 
    +> The instructions below outline installation of the current stable release
    +> (**v{{site.compose_version}}**) of Compose. To install a different version of
    +> Compose, replace the given release number with the one that you want. Compose
    +> releases are also listed and available for direct download on the
    +> [Compose repository release page on GitHub](https://github.com/docker/compose/releases){:target="_blank" class="_"}.
    +> To install a **pre-release** of Compose, refer to the [install pre-release builds](#install-pre-release-builds)
    +> section.
    +
     
     
    + ### Install Compose on macOS **Docker Desktop for Mac** and **Docker Toolbox** already include Compose along @@ -45,10 +57,11 @@ Docker install instructions for these are here: * [Get Docker Desktop for Mac](/docker-for-mac/install.md) * [Get Docker Toolbox](/toolbox/overview.md) (for older systems) -
    +
    -### Install Compose on Windows systems + +### Install Compose on Windows desktop systems **Docker Desktop for Windows** and **Docker Toolbox** already include Compose along with other Docker apps, so most Windows users do not need to @@ -57,68 +70,72 @@ install Compose separately. Docker install instructions for these are here: * [Get Docker Desktop for Windows](/docker-for-windows/install.md) * [Get Docker Toolbox](/toolbox/overview.md) (for older systems) -**If you are running the Docker daemon and client directly on Microsoft -Windows Server 2016** (with [Docker EE for Windows Server 2016](/install/windows/docker-ee.md), you _do_ need to install -Docker Compose. To do so, follow these steps: +If you are running the Docker daemon and client directly on Microsoft +Windows Server, follow the instructions in the Windows Server tab. + +
    +
    + +### Install Compose on Windows Server + +Follow these instructions if you are running the Docker daemon and client directly +on Microsoft Windows Server with [Docker Engine - Enterprise](/install/windows/docker-ee.md), +and want to install Docker Compose. + 1. Start an "elevated" PowerShell (run it as administrator). Search for PowerShell, right-click, and choose **Run as administrator**. When asked if you want to allow this app to make changes to your device, click **Yes**. - In Powershell, since Github now requires TLS1.2, run the following: +2. In PowerShell, since GitHub now requires TLS1.2, run the following: - ```none + ```powershell [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 ``` - Then run the following command to download - Docker Compose, replacing `$dockerComposeVersion` with the specific - version of Compose you want to use: + Then run the following command to download the current stable release of + Compose (v{{site.compose_version}}): - ```none - Invoke-WebRequest "https://github.com/docker/compose/releases/download/$dockerComposeVersion/docker-compose-Windows-x86_64.exe" -UseBasicParsing -OutFile $Env:ProgramFiles\Docker\Docker\resources\bin\docker-compose.exe + ```powershell + Invoke-WebRequest "https://github.com/docker/compose/releases/download/{{site.compose_version}}/docker-compose-Windows-x86_64.exe" -UseBasicParsing -OutFile $Env:ProgramFiles\Docker\docker-compose.exe ``` - For example, to download Compose version {{site.compose_version}}, - the command is: +**Note**: On Windows Server 2019, you can add the Compose executable to `$Env:ProgramFiles\Docker`. Because this directory is registered in the system `PATH`, you can run the `docker-compose --version` command on the subsequent step with no additional configuration. - ```none - Invoke-WebRequest "https://github.com/docker/compose/releases/download/{{site.compose_version}}/docker-compose-Windows-x86_64.exe" -UseBasicParsing -OutFile $Env:ProgramFiles\Docker\Docker\resources\bin\docker-compose.exe + > To install a different version of Compose, substitute `{{site.compose_version}}` + > with the version of Compose you want to use. + +3. Test the installation. + + ```powershell + docker-compose --version + + docker-compose version {{site.compose_version}}, build 01110ad01 ``` - > Use the latest Compose release number in the download command. - > - > As already mentioned, the above command is an _example_, and - it may become out-of-date once in a while. Always follow the - command pattern shown above it. If you cut-and-paste an example, - check which release it specifies and, if needed, - replace `$dockerComposeVersion` with the release number that - you want. Compose releases are also available for direct download - on the [Compose repository release page on GitHub](https://github.com/docker/compose/releases){:target="_blank" class="_"}. - {: .important} -2. Run the executable to install Compose. -
    + ### Install Compose on Linux systems -On **Linux**, you can download the Docker Compose binary from the [Compose +On Linux, you can download the Docker Compose binary from the [Compose repository release page on GitHub](https://github.com/docker/compose/releases){: target="_blank" class="_"}. Follow the instructions from the link, which involve -running the `curl` command in your terminal to download the binaries. These step -by step instructions are also included below. +running the `curl` command in your terminal to download the binaries. These step-by-step instructions are also included below. -1. Run this command to download the latest version of Docker Compose: +> For `alpine`, the following dependency packages are needed: +> `py-pip`, `python-dev`, `libffi-dev`, `openssl-dev`, `gcc`, `libc-dev`, and `make`. +{: .important} + +1. Run this command to download the current stable release of Docker Compose: ```bash sudo curl -L "https://github.com/docker/compose/releases/download/{{site.compose_version}}/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose ``` - > Use the latest Compose release number in the download command. - > - The above command is an _example_, and it may become out-of-date. To ensure you have the latest version, check the [Compose repository release page on GitHub](https://github.com/docker/compose/releases){: target="_blank" class="_"}. - {: .important} + > To install a different version of Compose, substitute `{{site.compose_version}}` + > with the version of Compose you want to use. If you have problems installing with `curl`, see [Alternative Install Options](install.md#alternative-install-options) tab above. @@ -147,9 +164,9 @@ sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose $ docker-compose --version docker-compose version {{site.compose_version}}, build 1110ad01 ``` -
    + ### Alternative install options - [Install using pip](#install-using-pip) @@ -157,6 +174,10 @@ sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose #### Install using pip +> For `alpine`, the following dependency packages are needed: +> `py-pip`, `python-dev`, `libffi-dev`, `openssl-dev`, `gcc`, `libc-dev`, and `make`. +{: .important} + Compose can be installed from [pypi](https://pypi.python.org/pypi/docker-compose) using `pip`. If you install using `pip`, we recommend that you use a @@ -180,34 +201,31 @@ sudo pip install docker-compose #### Install as a container Compose can also be run inside a container, from a small bash script wrapper. To -install compose as a container run this command. Be sure to replace the version -number with the one that you want, if this example is out-of-date: +install compose as a container run this command: ```bash $ sudo curl -L --fail https://github.com/docker/compose/releases/download/{{site.compose_version}}/run.sh -o /usr/local/bin/docker-compose $ sudo chmod +x /usr/local/bin/docker-compose ``` -> Use the latest Compose release number in the download command. -> -The above command is an _example_, and it may become out-of-date once in a -while. Check which release it specifies and, if needed, replace the given -release number with the one that you want. Compose releases are also listed and -available for direct download on the [Compose repository release page on -GitHub](https://github.com/docker/compose/releases){: target="_blank" -class="_"}. +
    +
    + +---- + +## Install pre-release builds + +If you're interested in trying out a pre-release build, you can download release +candidates from the [Compose repository release page on GitHub](https://github.com/docker/compose/releases){: target="_blank" class="_"}. +Follow the instructions from the link, which involves running the `curl` command +in your terminal to download the binaries. + +Pre-releases built from the "master" branch are also available for download at +[https://dl.bintray.com/docker-compose/master/](https://dl.bintray.com/docker-compose/master/){: target="_blank" class="_"}. + +> Pre-release builds allow you to try out new features before they are released, +> but may be less stable. {: .important} -
    -
    - - -## Master builds - -If you're interested in trying out a pre-release build, you can download a binary -from -[https://dl.bintray.com/docker-compose/master/](https://dl.bintray.com/docker-compose/master/). -Pre-release builds allow you to try out new features before they are released, -but may be less stable. ## Upgrading @@ -218,7 +236,7 @@ version 1.3, Compose uses Docker labels to keep track of containers, and your containers need to be recreated to add the labels. If Compose detects containers that were created without labels, it refuses -to run so that you don't end up with two sets of them. If you want to keep using +to run, so that you don't end up with two sets of them. If you want to keep using your existing containers (for example, because they have data volumes you want to preserve), you can use Compose 1.5.x to migrate them with the following command: diff --git a/compose/reference/build.md b/compose/reference/build.md index 0bac60a39d..8b229b788e 100644 --- a/compose/reference/build.md +++ b/compose/reference/build.md @@ -1,5 +1,5 @@ --- -description: docker-compose build +description: Build or rebuild services. keywords: fig, composition, compose, docker, orchestration, cli, build title: docker-compose build notoc: true diff --git a/compose/reference/config.md b/compose/reference/config.md index 56542663ab..f7bedd3426 100644 --- a/compose/reference/config.md +++ b/compose/reference/config.md @@ -5,12 +5,12 @@ title: docker-compose config notoc: true --- -```: +``` Usage: config [options] Options: --resolve-image-digests Pin image tags to digests. - -q, --quiet Only validate the configuration – do not print anything. + -q, --quiet Only validate the configuration, don't print anything. --services Print the service names, one per line. --volumes Print the volume names, one per line. --hash="*" Print the service config hash, one per line. diff --git a/compose/reference/events.md b/compose/reference/events.md index 1b5f1e420b..6d8106fb49 100644 --- a/compose/reference/events.md +++ b/compose/reference/events.md @@ -17,7 +17,7 @@ Stream container events for every container in the project. With the `--json` flag, a json object is printed one per line with the format: -``` +```json { "time": "2015-11-20T18:01:03.615550", "type": "container", @@ -26,7 +26,7 @@ format: "service": "web", "attributes": { "name": "application_web_1", - "image": "alpine:edge", + "image": "alpine:edge" } } ``` diff --git a/compose/reference/images.md b/compose/reference/images.md index b560ea9944..ee6e095c42 100644 --- a/compose/reference/images.md +++ b/compose/reference/images.md @@ -9,7 +9,7 @@ notoc: true Usage: images [options] [SERVICE...] Options: --q Only display image IDs + -q, --quiet Only display IDs ``` List images used by the created containers. diff --git a/compose/reference/kill.md b/compose/reference/kill.md index a72873d8b0..ecf06b5677 100644 --- a/compose/reference/kill.md +++ b/compose/reference/kill.md @@ -9,7 +9,8 @@ notoc: true Usage: kill [options] [SERVICE...] Options: --s SIGNAL SIGNAL to send to the container. Default signal is SIGKILL. + -s SIGNAL SIGNAL to send to the container. + Default signal is SIGKILL. ``` Forces running containers to stop by sending a `SIGKILL` signal. Optionally the diff --git a/compose/reference/logs.md b/compose/reference/logs.md index f74c123f5b..c54e1e9fb9 100644 --- a/compose/reference/logs.md +++ b/compose/reference/logs.md @@ -9,11 +9,11 @@ notoc: true Usage: logs [options] [SERVICE...] Options: ---no-color Produce monochrome output. --f, --follow Follow log output --t, --timestamps Show timestamps ---tail="all" Number of lines to show from the end of the logs - for each container. + --no-color Produce monochrome output. + -f, --follow Follow log output. + -t, --timestamps Show timestamps. + --tail="all" Number of lines to show from the end of the logs + for each container. ``` Displays log output from services. diff --git a/compose/reference/port.md b/compose/reference/port.md index 1892210b0a..38aaf38f56 100644 --- a/compose/reference/port.md +++ b/compose/reference/port.md @@ -9,9 +9,9 @@ notoc: true Usage: port [options] SERVICE PRIVATE_PORT Options: ---protocol=proto tcp or udp [default: tcp] ---index=index index of the container if there are multiple - instances of a service [default: 1] + --protocol=proto tcp or udp [default: tcp] + --index=index index of the container if there are multiple + instances of a service [default: 1] ``` Prints the public port for a port binding. diff --git a/compose/reference/ps.md b/compose/reference/ps.md index 302f961603..0dcca28466 100644 --- a/compose/reference/ps.md +++ b/compose/reference/ps.md @@ -9,7 +9,10 @@ notoc: true Usage: ps [options] [SERVICE...] Options: --q Only display IDs + -q, --quiet Only display IDs + --services Display services + --filter KEY=VAL Filter services by a property + -a, --all Show all stopped containers (including those created by the run command) ``` Lists containers. diff --git a/compose/reference/pull.md b/compose/reference/pull.md index 2fb5329277..d86d813a82 100644 --- a/compose/reference/pull.md +++ b/compose/reference/pull.md @@ -20,7 +20,7 @@ Pulls an image associated with a service defined in a `docker-compose.yml` or `d For example, suppose you have this `docker-compose.yml` file from the [Quickstart: Compose and Rails](/compose/rails.md) sample. -``` +```yaml version: '2' services: db: @@ -38,7 +38,7 @@ services: If you run `docker-compose pull ServiceName` in the same directory as the `docker-compose.yml` file that defines the service, Docker pulls the associated image. For example, to call the `postgres` image configured as the `db` service in our example, you would run `docker-compose pull db`. -``` +```bash $ docker-compose pull db Pulling db (postgres:latest)... latest: Pulling from library/postgres diff --git a/compose/reference/restart.md b/compose/reference/restart.md index e30094ba7b..8b361d4d3b 100644 --- a/compose/reference/restart.md +++ b/compose/reference/restart.md @@ -9,7 +9,8 @@ notoc: true Usage: restart [options] [SERVICE...] Options: --t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10) + -t, --timeout TIMEOUT Specify a shutdown timeout in seconds. + (default: 10) ``` Restarts all stopped and running services. diff --git a/compose/reference/scale.md b/compose/reference/scale.md index bf553f872f..2b71bde9e5 100644 --- a/compose/reference/scale.md +++ b/compose/reference/scale.md @@ -11,7 +11,11 @@ notoc: true of `up` command. ``` -Usage: scale [SERVICE=NUM...] +Usage: scale [options] [SERVICE=NUM...] + +Options: + -t, --timeout TIMEOUT Specify a shutdown timeout in seconds. + (default: 10) ``` Sets the number of containers to run for a service. diff --git a/compose/reference/stop.md b/compose/reference/stop.md index 2da3ff2669..82e0a7a76b 100644 --- a/compose/reference/stop.md +++ b/compose/reference/stop.md @@ -9,7 +9,8 @@ notoc: true Usage: stop [options] [SERVICE...] Options: --t, --timeout TIMEOUT Specify a shutdown timeout in seconds (default: 10). + -t, --timeout TIMEOUT Specify a shutdown timeout in seconds. + (default: 10) ``` Stops running containers without removing them. They can be started again with diff --git a/config/containers/live-restore.md b/config/containers/live-restore.md index eaa2c2310d..f37a285b68 100644 --- a/config/containers/live-restore.md +++ b/config/containers/live-restore.md @@ -34,7 +34,7 @@ when the daemon becomes unavailable. **Only do one of the following**. ``` - Restart the Docker daemon. On Linux, you can avoid a restart (and avoid any - downtime for your containers) by reload the Docker daemon. If you use + downtime for your containers) by reloading the Docker daemon. If you use `systemd`, then use the command `systemctl reload docker`. Otherwise, send a `SIGHUP` signal to the `dockerd` process. diff --git a/config/containers/logging/awslogs.md b/config/containers/logging/awslogs.md index 282f4c71cf..f46b92359c 100644 --- a/config/containers/logging/awslogs.md +++ b/config/containers/logging/awslogs.md @@ -40,6 +40,16 @@ You can set the logging driver for a specific container by using the docker run --log-driver=awslogs ... +If you are using Docker Compose, set `awslogs` using the following declaration example: + +```yaml +myservice: + logging: + driver: awslogs + options: + awslogs-region: us-east-1 +``` + ## Amazon CloudWatch Logs options You can add logging options to the `daemon.json` to set Docker-wide defaults, diff --git a/config/containers/logging/journald.md b/config/containers/logging/journald.md index ecd850d63f..698f53093b 100644 --- a/config/containers/logging/journald.md +++ b/config/containers/logging/journald.md @@ -56,7 +56,7 @@ driver options. | Option | Required | Description | |:------------|:---------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `tag` | optional | Specify template to set `CONTAINER_TAG` and `SYSLOG_IDENTIFIER` value in journald logs. Refer to [log tag option documentation](/engine/admin/logging/log_tags/) to customize the log tag format | +| `tag` | optional | Specify template to set `CONTAINER_TAG` and `SYSLOG_IDENTIFIER` value in journald logs. Refer to [log tag option documentation](/engine/admin/logging/log_tags/) to customize the log tag format. | | `labels` | optional | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for the container. | | `env` | optional | Comma-separated list of keys of environment variables, which should be included in message, if these variables are specified for the container. | | `env-regex` | optional | Similar to and compatible with env. A regular expression to match logging-related environment variables. Used for advanced [log tag options](/engine/admin/logging/log_tags/). | diff --git a/config/containers/logging/json-file.md b/config/containers/logging/json-file.md index 6e397885f6..913f08d305 100644 --- a/config/containers/logging/json-file.md +++ b/config/containers/logging/json-file.md @@ -13,6 +13,10 @@ and writes them in files using the JSON format. The JSON format annotates each l origin (`stdout` or `stderr`) and its timestamp. Each log file contains information about only one container. +```json +{"log":"Log line is here\n","stream":"stdout","time":"2019-01-01T11:11:11.111111111Z"} +``` + ## Usage To use the `json-file` driver as the default logging driver, set the `log-driver` diff --git a/config/containers/logging/local.md b/config/containers/logging/local.md new file mode 100644 index 0000000000..708e4a3ee8 --- /dev/null +++ b/config/containers/logging/local.md @@ -0,0 +1,55 @@ +--- +description: Describes how to use the local binary (Protobuf) logging driver. +keywords: local, protobuf, docker, logging, driver +redirect_from: +- /engine/reference/logging/local/ +- /engine/admin/logging/local/ +title: local binary file Protobuf logging driver +--- + +This `log-driver` writes to `local` binary files using Protobuf [Protocol Buffers](https://en.wikipedia.org/wiki/Protocol_Buffers) + +## Usage + +To use the `local` driver as the default logging driver, set the `log-driver` +and `log-opt` keys to appropriate values in the `daemon.json` file, which is +located in `/etc/docker/` on Linux hosts or +`C:\ProgramData\docker\config\daemon.json` on Windows Server. For more information about +configuring Docker using `daemon.json`, see +[daemon.json](/engine/reference/commandline/dockerd.md#daemon-configuration-file). + +The following example sets the log driver to `local`. + +```json +{ + "log-driver": "local", + "log-opts": {} +} +``` + +> **Note**: `log-opt` configuration options in the `daemon.json` configuration +> file must be provided as strings. Boolean and numeric values (such as the value +> for `max-file` in the example above) must therefore be enclosed in quotes (`"`). + +Restart Docker for the changes to take effect for newly created containers. + +Existing containers will not use the new logging configuration. + +You can set the logging driver for a specific container by using the +`--log-driver` flag to `docker container create` or `docker run`: + +```bash +$ docker run \ + --log-driver local --log-opt compress="false" \ + alpine echo hello world +``` + +### Options + +The `local` logging driver supports the following logging options: + +| Option | Description | Example value | +|:------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------| +| `max-size` | The maximum size of each binary log file before rotation. A positive integer plus a modifier representing the unit of measure (`k`, `m`, or `g`). Defaults to `20m`. | `--log-opt max-size=10m` | +| `max-file` | The maximum number of binary log files. If rotating the logs creates an excess file, the oldest file is removed. **Only effective when `max-size` is also set.** A positive integer. Defaults to `5`. | `--log-opt max-file=5` | +| `compress` | Whether or not the binary files should be compressed. Defaults to `true` | `--log-opt compress=true` | diff --git a/config/thirdparty/index.md b/config/thirdparty/index.md new file mode 100644 index 0000000000..ac0c196f63 --- /dev/null +++ b/config/thirdparty/index.md @@ -0,0 +1,17 @@ +--- +description: Third-party configuration tools +keywords: third-party, tools, monitoring, configuration, usage, docker, daemon +redirect_from: +- /config/thirdparty/monitoring/ +title: Work with third-party monitoring tools +--- + +You can configure Docker to use third-party monitoring tools. This topic lists various third-party tools you can use to monitor Docker. + +* [Collect Docker metrics using Prometheus](/config/thirdparty/prometheus/) + +* [Sysdig Monitoring Solution Brief for Docker Enterprise Edition](https://success.docker.com/article/sysdig-monitoring) + +* [Datadog Monitoring Solution Brief for Docker Enterprise Edition](https://success.docker.com/article/datadog-monitoring) + + >**Note**: Docker no longer maintains the previously listed documentation about using Sysdig and Datadog from within Docker. diff --git a/cs-engine/1.12/index.md b/cs-engine/1.12/index.md deleted file mode 100644 index 44297babe2..0000000000 --- a/cs-engine/1.12/index.md +++ /dev/null @@ -1,400 +0,0 @@ ---- -description: Learn how to install the commercially supported version of Docker Engine. -keywords: docker, engine, dtr, install -title: Install CS Docker Engine -redirect_from: -- /cs-engine/1.12/install/ ---- - -Follow these instructions to install CS Docker Engine, the commercially -supported version of Docker Engine. - -CS Docker Engine can be installed on the following operating systems: - -* [CentOS 7.1/7.2 & RHEL 7.0/7.1/7.2 (YUM-based systems)](#install-on-centos-7172--rhel-707172-yum-based-systems) -* [Ubuntu 14.04 LTS](#install-on-ubuntu-1404-lts) -* [SUSE Linux Enterprise 12](#install-on-suse-linux-enterprise-123) - -You can install CS Docker Engine using a repository or using packages. - -- If you [use a repository](#install-using-a-repository), your operating system - will notify you when updates are available and you can upgrade or downgrade - easily, but you need an internet connection. This approach is recommended. - -- If you [use packages](#install-using-packages), you can install CS Docker - Engine on air-gapped systems that have no internet connection. However, you - are responsible for manually checking for updates and managing upgrades. - -## Prerequisites - -To install CS Docker Engine, you need root or sudo privileges and you need -access to a command line on the system. - -## Install using a repository - -### Install on CentOS 7.1/7.2 & RHEL 7.0/7.1/7.2/7.3 (YUM-based systems) - -This section explains how to install on CentOS 7.1/7.2 & RHEL 7.0/7.1/7.2/7.3. Only -these versions are supported. CentOS 7.0 is **not** supported. On RHEL, -depending on your current level of updates, you may need to reboot your server -to update its RHEL kernel. - -1. Add the Docker public key for CS Docker Engine packages: - - ```bash - $ sudo rpm --import "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e" - ``` - - > **Note**: If the key server above does not respond, you can try one of these: - > - > - pgp.mit.edu - > - keyserver.ubuntu.com - -2. Install yum-utils if necessary: - - ```bash - $ sudo yum install -y yum-utils - ``` - -3. Add the Docker repository: - - ```bash - $ sudo yum-config-manager --add-repo https://packages.docker.com/1.12/yum/repo/main/centos/7 - ``` - - This adds the repository of the latest version of CS Docker Engine. You can - customize the URL to install an older version. - -4. Install Docker CS Engine: - - - **Latest version**: - - ```bash - $ sudo yum makecache fast - - $ sudo yum install docker-engine - ``` - - - **Specific version**: - - On production systems, you should install a specific version rather than - relying on the latest. - - 1. List the available versions: - - ```bash - $ yum list docker-engine.x86_64 --showduplicates |sort -r - ``` - - The second column represents the version. - - 2. Install a specific version by adding the version after `docker-engine`, - separated by a hyphen (`-`): - - ```bash - $ sudo yum install docker-engine- - ``` - -5. Configure `devicemapper`: - - By default, the `devicemapper` graph driver does not come pre-configured in - a production-ready state. Follow the documented step by step instructions to - [configure devicemapper with direct-lvm for production](../../engine/userguide/storagedriver/device-mapper-driver/#configure-direct-lvm-mode-for-production) - to achieve the best performance and reliability for your environment. - -6. Configure the Docker daemon to start automatically when the system starts, - and start it now. - - ```bash - $ sudo systemctl enable docker.service - $ sudo systemctl start docker.service - ``` - -7. Confirm the Docker daemon is running: - - ```bash - $ sudo docker info - ``` - -8. Only users with `sudo` access can run `docker` commands. - Optionally, add non-sudo access to the Docker socket by adding your user - to the `docker` group. - - ```bash - $ sudo usermod -a -G docker $USER - ``` - -9. Log out and log back in to have your new permissions take effect. - - -### Install on Ubuntu 14.04 LTS or 16.04 LTS - -1. Install packages to allow `apt` to use a repository over HTTPS: - - ```bash - $ sudo apt-get update - - $ sudo apt-get install --no-install-recommends \ - apt-transport-https \ - curl \ - software-properties-common - ``` - - Optionally, install additional kernel modules to add AUFS support. - - ```bash - $ sudo apt-get install -y --no-install-recommends \ - linux-image-extra-$(uname -r) \ - linux-image-extra-virtual - ``` - -2. Download and import Docker's public key for CS packages: - - ```bash - $ curl -fsSL 'https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e' | sudo apt-key add - - ``` - - >**Note**: If the key server above does not respond, you can try one of these: - > - > - pgp.mit.edu - > - keyserver.ubuntu.com - -3. Add the repository. In the command below, the `lsb_release -cs` sub-command - returns the name of your Ubuntu version, like `xenial` or `trusty`. - - ```bash - $ sudo add-apt-repository \ - "deb https://packages.docker.com/1.12/apt/repo/ \ - ubuntu-$(lsb_release -cs) \ - main" - ``` - -4. Install CS Docker Engine: - - - **Latest version**: - - ```bash - $ sudo apt-get update - - $ sudo apt-get -y install docker-engine - ``` - - - **Specific version**: - - On production systems, you should install a specific version rather than - relying on the latest. - - 1. List the available versions: - - ```bash - $ sudo apt-get update - - $ apt-cache madison docker-engine - ``` - - The second column represents the version. - - 2. Install a specific version by adding the version after `docker-engine`, - separated by an equals sign (`=`): - - ```bash - $ sudo apt-get install docker-engine= - ``` - -5. Confirm the Docker daemon is running: - - ```bash - $ sudo docker info - ``` - -6. Only users with `sudo` access can run `docker` commands. - Optionally, add non-sudo access to the Docker socket by adding your user - to the `docker` group. - - ```bash - $ sudo usermod -a -G docker $USER - ``` - - Log out and log back in to have your new permissions take effect. - -### Install on SUSE Linux Enterprise 12.3 - -1. Refresh your repository: - - ```bash - $ sudo zypper update - ``` - -2. Add the Docker repository and public key: - - ```bash - $ sudo zypper ar -t YUM https://packages.docker.com/1.12/yum/repo/main/opensuse/12.3 docker-1.13 - $ sudo rpm --import 'https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e' - ``` - - This adds the repository of the latest version of CS Docker Engine. You can - customize the URL to install an older version. - - **Note**: If the key server above does not respond, you can try one of these: - > - > - pgp.mit.edu - > - keyserver.ubuntu.com - -3. Install CS Docker Engine. - - - **Latest version**: - - ```bash - $ sudo zypper refresh - - $ sudo zypper install docker-engine - ``` - - - **Specific version**: - - On production systems, you should install a specific version rather than - relying on the latest. - - 1. List the available versions: - - ```bash - $ sudo zypper refresh - - $ zypper search -s --match-exact -t package docker-engine - ``` - - The third column is the version string. - - 2. Install a specific version by adding the version after `docker-engine`, - separated by a hyphen (`-`): - - ```bash - $ sudo zypper install docker-engine- - ``` - -4. Configure the Docker daemon to start automatically when the system starts, - and start it now. - - ```bash - $ sudo systemctl enable docker.service - $ sudo systemctl start docker.service - ``` - -5. Confirm the Docker daemon is running: - - ```bash - $ sudo docker info - ``` - -6. Only users with `sudo` access can run `docker` commands. - Optionally, add non-sudo access to the Docker socket by adding your user - to the `docker` group. - - ```bash - $ sudo usermod -a -G docker $USER - ``` - - Log out and log back in to have your new permissions take effect. - -7. [Configure Btrfs for graph storage](/engine/userguide/storagedriver/btrfs-driver.md). - This is the only graph storage driver supported on SLES. - -## Install using packages - -If you need to install Docker on an air-gapped system with no access to the -internet, use the [package download link table](#package-download-links) to -download the Docker package for your operating system, then install it using the -[appropriate command](#general-commands). You are responsible for manually -upgrading Docker when a new version is available, and also for satisfying -Docker's dependencies. - -### General commands - -To install Docker from packages, use the following commands: - -| Operating system | Command | -|-----------------------|---------| -| RHEL / CentOS / SLES | `$ sudo yum install /path/to/package.rpm` | -| Ubuntu | `$ sudo dpkg -i /path/to/package.deb` | - -### Package download links - -{% assign rpm-prefix = "https://packages.docker.com/1.12/yum/repo/main" %} -{% assign deb-prefix = "https://packages.docker.com/1.12/apt/repo/pool/main/d/docker-engine" %} - -#### CS Docker Engine 1.12.6 - -{% comment %} Check on the S3 bucket for packages.docker.com for the versions. {% endcomment %} -{% assign rpm-version = "1.12.6.cs8-1" %} -{% assign rpm-rhel-version = "1.12.6.cs8-1" %} -{% assign deb-version = "1.12.6~cs8-0" %} - -| Operating system | Package links | -|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| RHEL 7.x and CentOS 7 | [docker-engine]({{ rpm-prefix }}/centos/7/Packages/docker-engine-{{ rpm-version}}.el7.centos.x86_64.rpm), [docker-engine-debuginfo]({{ rpm-prefix }}/centos/7/Packages/docker-engine-debuginfo-{{ rpm-version }}.el7.centos.x86_64.rpm), [docker-engine-selinux]({{ rpm-prefix }}/centos/7/Packages/docker-engine-selinux-{{ rpm-version}}1.el7.centos.noarch.rpm) | -| RHEL 7.2 (only use if you have problems with `selinux` with the packages above) | [docker-engine]({{ rpm-prefix }}/rhel/7.2/Packages/docker-engine-{{ rpm-rhel-version }}.el7.centos.x86_64.rpm), [docker-engine-debuginfo]({{ rpm-prefix }}/rhel/7.2/Packages/docker-engine-debuginfo-{{ rpm-rhel-version }}.el7.centos.x86_64.rpm), [docker-engine-selinux]({{ rpm-prefix }}/rhel/7.2/Packages/docker-engine-selinux-{{ rpm-rhel-version }}.el7.centos.noarch.rpm) | -| SLES 12 | [docker-engine]({{ rpm-prefix }}/opensuse/12.3/Packages/docker-engine-{{ rpm-version }}.x86_64.rpm) | -| Ubuntu Xenial | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-xenial_amd64.deb) | -| Ubuntu Wily | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-wily_amd64.deb) | -| Ubuntu Trusty | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-trusty_amd64.deb) | -| Ubuntu Precise | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-precisel_amd64.deb) | - -#### CS Docker Engine 1.12.5 - -{% comment %} Check on the S3 bucket for packages.docker.com for the versions. {% endcomment %} -{% assign rpm-version = "1.12.5.cs5-1" %} -{% assign deb-version = "1.12.5~cs5-0" %} - -| Operating system | Package links | -|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| RHEL 7.x and CentOS 7 | [docker-engine]({{ rpm-prefix }}/centos/7/Packages/docker-engine-{{ rpm-version}}.el7.centos.x86_64.rpm), [docker-engine-debuginfo]({{ rpm-prefix }}/centos/7/Packages/docker-engine-debuginfo-{{ rpm-version }}.el7.centos.x86_64.rpm), [docker-engine-selinux]({{ rpm-prefix }}/centos/7/Packages/docker-engine-selinux-{{ rpm-version}}1.el7.centos.noarch.rpm) | -| SLES 12 | [docker-engine]({{ rpm-prefix }}/opensuse/12.3/Packages/docker-engine-{{ rpm-version }}.x86_64.rpm) | -| Ubuntu Xenial | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-xenial_amd64.deb) | -| Ubuntu Wily | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-wily_amd64.deb) | -| Ubuntu Trusty | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-trusty_amd64.deb) | -| Ubuntu Precise | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-precisel_amd64.deb) | - -#### CS Docker Engine 1.12.3 - -{% comment %} Check on the S3 bucket for packages.docker.com for the versions. {% endcomment %} -{% assign rpm-version = "1.12.3.cs4-1" %} -{% assign deb-version = "1.12.3~cs4-0" %} - -| Operating system | Package links | -|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| RHEL 7.x and CentOS 7 | [docker-engine]({{ rpm-prefix }}/centos/7/Packages/docker-engine-{{ rpm-version}}.el7.centos.x86_64.rpm), [docker-engine-debuginfo]({{ rpm-prefix }}/centos/7/Packages/docker-engine-debuginfo-{{ rpm-version }}.el7.centos.x86_64.rpm), [docker-engine-selinux]({{ rpm-prefix }}/centos/7/Packages/docker-engine-selinux-{{ rpm-version}}1.el7.centos.noarch.rpm) | -| SLES 12 | [docker-engine]({{ rpm-prefix }}/opensuse/12.3/Packages/docker-engine-{{ rpm-version }}.x86_64.rpm) | -| Ubuntu Xenial | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-xenial_amd64.deb) | -| Ubuntu Wily | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-wily_amd64.deb) | -| Ubuntu Trusty | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-trusty_amd64.deb) | -| Ubuntu Precise | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-precisel_amd64.deb) | - -#### CS Docker Engine 1.12.2 - -{% comment %} Check on the S3 bucket for packages.docker.com for the versions. {% endcomment %} -{% assign rpm-version = "1.12.2.cs2-1" %} -{% assign deb-version = "1.12.2~cs2-0" %} - -| Operating system | Package links | -|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| RHEL 7.x and CentOS 7 | [docker-engine]({{ rpm-prefix }}/centos/7/Packages/docker-engine-{{ rpm-version}}.el7.centos.x86_64.rpm), [docker-engine-debuginfo]({{ rpm-prefix }}/centos/7/Packages/docker-engine-debuginfo-{{ rpm-version }}.el7.centos.x86_64.rpm), [docker-engine-selinux]({{ rpm-prefix }}/centos/7/Packages/docker-engine-selinux-{{ rpm-version}}1.el7.centos.noarch.rpm) | -| SLES 12 | [docker-engine]({{ rpm-prefix }}/opensuse/12.3/Packages/docker-engine-{{ rpm-version }}.x86_64.rpm) | -| Ubuntu Xenial | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-xenial_amd64.deb) | -| Ubuntu Wily | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-wily_amd64.deb) | -| Ubuntu Trusty | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-trusty_amd64.deb) | -| Ubuntu Precise | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-precisel_amd64.deb) | - -#### CS Docker Engine 1.12.1 - -{% comment %} Check on the S3 bucket for packages.docker.com for the versions. {% endcomment %} -{% assign rpm-version = "1.12.1.cs1-1" %} -{% assign deb-version = "1.12.1~cs1-0" %} - -| Operating system | Package links | -|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| RHEL 7.x and CentOS 7 | [docker-engine]({{ rpm-prefix }}/centos/7/Packages/docker-engine-{{ rpm-version}}.el7.centos.x86_64.rpm), [docker-engine-debuginfo]({{ rpm-prefix }}/centos/7/Packages/docker-engine-debuginfo-{{ rpm-version }}.el7.centos.x86_64.rpm), [docker-engine-selinux]({{ rpm-prefix }}/centos/7/Packages/docker-engine-selinux-{{ rpm-version}}1.el7.centos.noarch.rpm) | -| SLES 12 | [docker-engine]({{ rpm-prefix }}/opensuse/12.3/Packages/docker-engine-{{ rpm-version }}.x86_64.rpm) | -| Ubuntu Xenial | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-xenial_amd64.deb) | -| Ubuntu Wily | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-wily_amd64.deb) | -| Ubuntu Trusty | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-trusty_amd64.deb) | -| Ubuntu Precise | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-precisel_amd64.deb) | diff --git a/cs-engine/1.12/release-notes/index.md b/cs-engine/1.12/release-notes/index.md deleted file mode 100644 index b4b207f3d0..0000000000 --- a/cs-engine/1.12/release-notes/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -description: The release notes for CS Docker Engine. -keywords: docker, engine, release notes -title: Commercially Supported Docker Engine release notes ---- - -* [Release notes](release-notes.md) -* [Prior release notes](prior-release-notes.md) diff --git a/cs-engine/1.12/release-notes/prior-release-notes.md b/cs-engine/1.12/release-notes/prior-release-notes.md deleted file mode 100644 index 397b18f077..0000000000 --- a/cs-engine/1.12/release-notes/prior-release-notes.md +++ /dev/null @@ -1,341 +0,0 @@ ---- -description: Archived release notes for commercially supported Docker Engine -keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry, release, commercially supported Docker Engine -redirect_from: -- /docker-trusted-registry/cse-prior-release-notes/ -- /docker-trusted-registry/cs-engine/release-notes/prior-release-notes/ -- /cs-engine/release-notes/prior-release-notes/ -title: Release notes archive for Commercially Supported Docker Engine. ---- - -This document contains the previous versions of the commercially supported -Docker Engine release notes. It includes issues, fixes, and new features. - -Refer to the [detailed list](https://github.com/moby/moby/releases) of all changes since the release of CS Engine 1.10.3-cs3 - -## CS Engine 1.10.3-cs4 -(12 Jan 2017) - -Bumps RunC version to address CVE-2016-9962. - -## CS Engine 1.10.3-cs3 -(25 April 2016) - -This release addresses the following issue: - -A vulnerability in the Go standard runtime libraries allowed a maliciously crafted client certificate to be used to cause an infinite loop in a TLS server. This can lead to a Denial of Service against the Docker Engine if it is deployed such that it uses TLS client certificate authentication. This vulnerability has been fixed in this release. We consider this a low-impact issue, due to complexity of attack. Customers should consider upgrading if their deployed Docker Engines are exposed to potentially malicious network attackers. - -This issue is resolved by using Go runtime v1.5.4 which was released to address this vulnerability - -* https://github.com/moby/moby/pull/21977 -* https://github.com/moby/moby/pull/21987 - -## CS Engine 1.10.3-cs2 -(18 March 2016) - -Bug fix release picking up changes from Docker 1.10.3 release. - -Refer to the [detailed list](https://github.com/moby/moby/releases/tag/v1.10.3) of all changes since the release of CS Engine 1.10.2-cs1 - -## CS Engine 1.10.2-cs1 -(22 February 2016) - -In this release the CS Engine is supported on SUSE Linux Enterprise 12 OS. - -Refer to the [detailed list](https://github.com/moby/moby/releases) of all changes since the release of CS Engine 1.9.1. - -## CS Engine 1.9.1-cs3 -(6 January 2016) - -This release addresses the following issues: - -* The commercially supported Engine 1.9.1-cs3 now supports multi-host networking -for all the kernels that the base CS Engine is supported on. - ->**Note**: Centos 7 has its firewall enabled by default and it prevents the VXLAN tunnel from communicating. If this applies to you, then after installing the CS Engine, execute the following command in the Linux host: - - sudo firewall-cmd --zone=public --permanent --add-port=4789/udp - - -* Corrected an issue where Docker didn't remove the Masquerade NAT rule from `iptables` when the network was removed. This caused the gateway address to be -incorrectly propagated as the source address of a connection. - -* Fixed an issue where if the daemon started multiple containers concurrently, then the `/etc/hosts` files were incompletely populated. This issue occurred randomly. - -* Corrected an issue where the same IP address for different Docker containers resulted in network connection inconsistencies. Now each container has a separate IP address. - -* Corrected an issue where the IPv6 gateway was not created when using custom networks although the network had a configured gateway. - -* Fixed an issue where users might have experienced a panic error if the daemon was started with the `—cluster-store` option, but without the `—cluster-advertise` option. - -## CS Engine 1.9.1-cs2 -(4 December 2015) - -Starting with this release, upgrading minor versions, for example, from 1.9.0 to 1.9.1, is faster and easier. - -You can refer to the detailed list of all changes since the release of CS Engine -1.9.0 -https://github.com/moby/moby/releases. - -## CS Engine 1.9.0 -(12 November 2015) - -Highlighted feature summary: - -* Network Management and Plugins. Networks are now first class objects that can be listed, created, deleted, inspected, and connected to or disconnected from a -container. They can be manipulated outside of the container themselves and are -fully manageable on its own lifecycle. You can also use plugins to extend -network functionality. - -* Docker, Inc. now provides support for the in-box Overlay (for cross-host networking) and Bridge network plugins. You can find more information about how -to manage networks and using network plugins in the [documentation](/engine/userguide/networking/index.md). - -* Volume Management and Plugins. Volumes also become discrete, manageable objects in Docker. Volumes can be listed, created, deleted, and inspected. -Similar to networks, they have their own managed lifecycle outside of the -container. Plugins allow others to write and extend the functionality of volumes -or provide integration with other types of storage. - -* The in-box volume driver is included and supported. You can find more information about how to manage volumes and using volume plugins in the -documentation. - -* Docker Content Trust. Use Content Trust to both verify the integrity and the publisher of all the data received from a registry over any channel. Content Trust is currently only supported using Docker Hub notary servers. - -* Updated the release cadence of the CS Docker Engine. Starting with this version, Docker supports **every** major release of Docker Engine from open -source with three releases under support at one time. This means you’ll be able -to take advantage of the latest and greatest features and you won’t have to wait -for a supported release to take advantage of a specific feature. - -Refer to the [detailed list](https://github.com/moby/moby/releases) of all changes since the release of CS Engine 1.6. - -## CS Engine 1.6.2-cs7 -(12 October 2015) - -As part of our ongoing security efforts, a vulnerability was discovered that affects the way content -is stored and retrieved within the Docker Engine and CS Docker Engine. Today we -are releasing a security update that fixes this issue in both Docker Engine 1.8.3 and CS Docker Engine 1.6.2-cs7. The change log for Docker Engine 1.8.3 has a complete list of all the changes incorporated into both the open source and commercially -supported releases. - -We recommend that users upgrade to CS Docker Engine 1.6.2-cs7. If you are unable -to upgrade to CS Docker Engine 1.6.2-cs7 right away, remember to only pull -content from trusted sources. - -To keep up to date on all the latest Docker Security news, make sure you check -out our [Security page](http://www.docker.com/docker-security), subscribe to our mailing list, or find us in #docker-security. - -## CS Docker Engine 1.6.2-cs6 -(23 July 2015) - -Certifies support for CentOS 7.1. - -## CS Docker Engine 1.6.2-cs5 -(21 May 2015) - -For customers running Docker Engine on [supported versions of RedHat Enterprise Linux](https://www.docker.com/enterprise/support/) with SELinux enabled, the `docker build` and `docker run` commands will not have DNS host name resolution and bind-mounted volumes may not be accessible. As a result, customers with -SELinux will be unable to use hostname-based network access in either `docker build` or `docker run`, nor will they be able to `docker run` containers that use `--volume` or `-v` bind-mounts (with an incorrect SELinux label) in their environment. By installing Docker Engine 1.6.2-cs5, customers can use Docker as intended on RHEL with SELinux enabled. - -For example, you see will failures such as: - -```bash -[root@dtr ~]# docker -v -Docker version 1.6.0-cs2, build b8dd430 -[root@dtr ~]# ping dtr.home.org.au -PING dtr.home.org.au (10.10.10.104) 56(84) bytes of data. -64 bytes from dtr.home.gateway (10.10.10.104): icmp_seq=1 ttl=64 time=0.663 ms -^C ---- dtr.home.org.au ping statistics --- -2 packets transmitted, 2 received, 0% packet loss, time 1001ms -rtt min/avg/max/mdev = 0.078/0.370/0.663/0.293 ms -[root@dtr ~]# docker run --rm -it debian ping dtr.home.org.au -ping: unknown host -[root@dtr ~]# docker run --rm -it debian cat /etc/resolv.conf -cat: /etc/resolv.conf: Permission denied -[root@dtr ~]# docker run --rm -it debian apt-get update -Err http://httpredir.debian.org jessie InRelease - -Err http://security.debian.org jessie/updates InRelease - -Err http://httpredir.debian.org jessie-updates InRelease - -Err http://security.debian.org jessie/updates Release.gpg - Could not resolve 'security.debian.org' -Err http://httpredir.debian.org jessie Release.gpg - Could not resolve 'httpredir.debian.org' -Err http://httpredir.debian.org jessie-updates Release.gpg - Could not resolve 'httpredir.debian.org' -[output truncated] -``` - -or when running a `docker build`: - -```bash -[root@dtr ~]# docker build . -Sending build context to Docker daemon 11.26 kB -Sending build context to Docker daemon -Step 0 : FROM fedora - ---> e26efd418c48 -Step 1 : RUN yum install httpd - ---> Running in cf274900ea35 - -One of the configured repositories failed (Fedora 21 - x86_64), -and yum doesn't have enough cached data to continue. At this point the only -safe thing yum can do is fail. There are a few ways to work "fix" this: - -[output truncated] -``` - -**Affected Versions**: All previous versions of Docker Engine when SELinux is - enabled. - -Docker **highly recommends** that all customers running previous versions of Docker Engine update to this release. - -### **How to workaround this issue** - -Customers who choose not to install this update have two options. The first -option is to disable SELinux. This is *not recommended* for production systems -where SELinux is typically required. - -The second option is to pass the following parameter in to `docker run`. - - --security-opt=label:type:docker_t - -This parameter cannot be passed to the `docker build` command. - -### **Upgrade notes** - -When upgrading, make sure you stop Docker Trusted Registry first, perform the Engine upgrade, and then restart Docker Trusted Registry. - -If you are running with SELinux enabled, previous Docker Engine releases allowed -you to bind-mount additional volumes or files inside the container as follows: - - $ docker run -it -v /home/user/foo.txt:/foobar.txt:ro - -In the 1.6.2-cs5 release, you must ensure additional bind-mounts have the -correct SELinux context. For example, if you want to mount `foobar.txt` as -read-only into the container, do the following to create and test your -bind-mount: - -1. Add the `z` option to the bind mount when you specify `docker run`. - - ```bash - $ docker run -it -v /home/user/foo.txt:/foobar.txt:ro,z - ``` - -2. Exec into your new container. - - For example, if your container is `bashful_curie`, open a shell on the - container: - - ```bash - $ docker exec -it bashful_curie bash - ``` - -3. Use `cat` to check the permissions on the mounted file. - - ```bash - $ cat /foobar.txt - the contents of foobar appear - ``` - -If you see the file's contents, your mount succeeded. If you receive a -`Permission denied` message and/or the `/var/log/audit/audit.log` file on your -Docker host contains an AVC Denial message, the mount did not succeed. - - type=AVC msg=audit(1432145409.197:7570): avc: denied { read } for pid=21167 comm="cat" name="foobar.txt" dev="xvda2" ino=17704136 scontext=system_u:system_r:svirt_lxc_net_t:s0:c909,c965 tcontext=unconfined_u:object_r:user_home_t:s0 tclass=file - -Recheck your command line to make sure you passed in the `z` option. - - -## CS Engine 1.6.2-cs4 -(13 May 2015) - -Fix mount regression for `/sys`. - -## CS Engine 1.6.1-cs3 -(11 May 2015) - -Docker Engine version 1.6.1 has been released to address several vulnerabilities -and is immediately available for all supported platforms. Users are advised to -upgrade existing installations of the Docker Engine and use 1.6.1 for new installations. - -It should be noted that each of the vulnerabilities allowing privilege escalation -may only be exploited by a malicious Dockerfile or image. Users are advised to -run their own images and/or images built by trusted parties, such as those in -the official images library. - -Send any questions to security@docker.com. - - -### **[CVE-2015-3629](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3629) Symlink traversal on container respawn allows local privilege escalation** - -Libcontainer version 1.6.0 introduced changes which facilitated a mount namespace -breakout upon respawn of a container. This allowed malicious images to write -files to the host system and escape containerization. - -Libcontainer and Docker Engine 1.6.1 have been released to address this -vulnerability. Users running untrusted images are encouraged to upgrade Docker Engine. - -Discovered by Tõnis Tiigi. - - -### **[CVE-2015-3627](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3627) Insecure opening of file-descriptor 1 leading to privilege escalation** - -The file-descriptor passed by libcontainer to the pid-1 process of a container -has been found to be opened prior to performing the chroot, allowing insecure -open and symlink traversal. This allows malicious container images to trigger -a local privilege escalation. - -Libcontainer and Docker Engine 1.6.1 have been released to address this -vulnerability. Users running untrusted images are encouraged to upgrade -Docker Engine. - -Discovered by Tõnis Tiigi. - -### **[CVE-2015-3630](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3630) Read/write proc paths allow host modification & information disclosure** - -Several paths underneath /proc were writable from containers, allowing global -system manipulation and configuration. These paths included `/proc/asound`, -`/proc/timer_stats`, `/proc/latency_stats`, and `/proc/fs`. - -By allowing writes to `/proc/fs`, it has been noted that CIFS volumes could be -forced into a protocol downgrade attack by a root user operating inside of a -container. Machines having loaded the timer_stats module were vulnerable to -having this mechanism enabled and consumed by a container. - -We are releasing Docker Engine 1.6.1 to address this vulnerability. All -versions up to 1.6.1 are believed vulnerable. Users running untrusted -images are encouraged to upgrade. - -Discovered by Eric Windisch of the Docker Security Team. - -### **[CVE-2015-3631](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3631) Volume mounts allow LSM profile escalation** - -By allowing volumes to override files of `/proc` within a mount namespace, a user -could specify arbitrary policies for Linux Security Modules, including setting -an unconfined policy underneath AppArmor, or a `docker_t` policy for processes -managed by SELinux. In all versions of Docker up until 1.6.1, it is possible for -malicious images to configure volume mounts such that files of proc may be overridden. - -We are releasing Docker Engine 1.6.1 to address this vulnerability. All versions -up to 1.6.1 are believed vulnerable. Users running untrusted images are encouraged -to upgrade. - -Discovered by Eric Windisch of the Docker Security Team. - -### **AppArmor policy improvements** - -The 1.6.1 release also marks preventative additions to the AppArmor policy. -Recently, several CVEs against the kernel have been reported whereby mount -namespaces could be circumvented through the use of the `sys_mount` syscall from -inside of an unprivileged Docker container. In all reported cases, the -AppArmor policy included in libcontainer and shipped with Docker has been -sufficient to deflect these attacks. However, we have deemed it prudent to -proactively tighten the policy further by outright denying the use of the -`sys_mount` syscall. - -Because this addition is preventative, no CVE-ID is requested. - -## CS Engine 1.6.0-cs2 -(23 Apr 2015) - -First release, see the [Docker Engine 1.6.0 Release notes](/v1.6/release-notes/) - for more details. diff --git a/cs-engine/1.12/release-notes/release-notes.md b/cs-engine/1.12/release-notes/release-notes.md deleted file mode 100644 index 88e7270210..0000000000 --- a/cs-engine/1.12/release-notes/release-notes.md +++ /dev/null @@ -1,221 +0,0 @@ ---- -description: Commercially supported Docker Engine release notes -keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry, Commercially Supported Docker Engine, release notes -redirect_from: -- /docker-trusted-registry/cse-release-notes/ -- /docker-trusted-registry/cs-engine/release-notes/release-notes/ -- /cs-engine/release-notes/release-notes/ -title: Commercially Supported Engine release notes ---- - -This document describes the latest changes, additions, known issues, and fixes -for the commercially supported Docker Engine (CS Engine). - -The CS Engine is functionally equivalent to the corresponding Docker Engine that -it references. However, a commercially supported release also includes -back-ported fixes (security-related and priority defects) from the open source. -It incorporates defect fixes that you can use in environments where new features -cannot be adopted as quickly for consistency and compatibility reasons. - -## Prior versions - -These notes refer to the current and immediately prior releases of the -CS Engine. For notes on older versions, see the [CS Engine prior release notes archive](prior-release-notes.md). - -## CS Engine 1.12.6-cs13 -(28 Jul 2017) - -* Fix packaging issue where packages were missing a `containerd` patch. - This resolves an issue with a deadlock in containerd related to healtchecks. -* Fix a deadlock on cancelling healthcecks. [#28462](https://github.com/moby/moby/pull/28462) - -## CS Engine 1.12.6-cs12 -(01 Jun 2017) - -* Fix an issue where if a volume using the local volume driver which has -mount options fails to unmount on container exit, the data in the mount may be -lost if the user attempts to subsequently remove the volume. [#32327](https://github.com/docker/docker/pulls/32327) - -## CS Engine 1.12.6-cs11 -(11 May 2017) - -* Fix an issue with overlay networks L2 miss notifications not being handled in -some cases [#1642](https://github.com/docker/libnetwork/pull/1642) - -## CS Engine 1.12.6-cs10 -(6 Mar 2017) - -* Fix concurrency issue in libnetwork - -## CS Engine 1.12.6-cs9 -(28 Feb 2017) - -* Fixes an issue causing containerd to deadlock [#336](https://github.com/docker/containerd/pull/336) -* Fixes an issue where encrypted overlay networks stop working [#30727](https://github.com/docker/docker/issues/30727) - -## CS Engine 1.12.6-cs8 -(8 Feb 2017) - -This release addresses the following issues: - -* Addresses performance issues introduced by external KV-Store access with the - `docker network ls` endpoint with large amounts of overlay networks and containers - attached to those networks - -* Addresses an inconsistent mac -> vtep binding issue when using overlay networks - -* Adds a new repository for RHEL 7.2 users, to deal with issues - users have encountered when installing the docker-engine-selinux package - on systems pinned to 7.2 packages that are older than those available in the - normal 7.2 install. This change relates to packaging changes for - [1.12.6-cs7](#cs-engine-1126-cs7). - - Users experiencing issues installing the selinux package should switch to this - repository. See [install instructions](/cs-engine/install.md) for more details. - Only switch to this repository if you encounter problems installing the - selinux packages from the centos/7 repo. - -## CS Engine 1.12.6-cs7 -(24 Jan 2017) - -This release addresses the following issues: - -* [#28406](https://github.com/docker/docker/issues/28406) Fix conflicts introduced -by the updated `selinux-policy` base package from RHEL/CentOS 7.3 -* [#26639](https://github.com/docker/docker/issues/26639) Resolves hostnames passed -to the local volume driver for nfs mount options. -* [#26111](https://github.com/docker/docker/issues/26111) Fix issue with adding -iptables rules due to xtables lock message change. - -## CS Engine 1.12.6-cs6 -(10 Jan 2017) - -Bumps RunC version to address CVE-2016-9962. - -Refer to the [detailed list](https://github.com/docker/docker/releases/tag/v1.12.6) of all -changes since the release of CS Engine 1.12.5-cs5. - -## CS Engine 1.12.5-cs5 -(21 Dec 2016) - -Refer to the [detailed list](https://github.com/docker/docker/releases/tag/v1.12.5) of all -changes since the release of CS Engine 1.12.3-cs4. - -## CS Engine 1.12.3-cs4 -(11 Nov 2016) - -This releases addresses the following issues: - -* [#27370](https://github.com/docker/docker/issues/27370) Fix `--net-alias` for -`--attachable` networks -* [#28051](https://github.com/docker/docker/issues/28051) Fix an issue removing -a `--attachable` network by ID. - -## CS Engine 1.12.3-cs3 -(27 Oct 2016) - -Refer to the [detailed list](https://github.com/docker/docker/releases) of all -changes since the release of CS Engine 1.12.2-cs2. - -## CS Engine 1.12.2-cs2 -(13 Oct 2016) - -Refer to the [detailed list](https://github.com/docker/docker/releases) of all -changes since the release of CS Engine 1.12.1-cs1. - -## CS Engine 1.12.1-cs1 -(20 Sep 2016) - -Refer to the [detailed list](https://github.com/docker/docker/releases) of all -changes since the release of CS Engine 1.11.2-cs5. - -This release addresses the following issues: - -* [#25962](https://github.com/docker/docker/pull/25962) Allow normal containers -to connect to swarm-mode overlay network -* Various bug fixes in swarm mode networking - -## CS Engine 1.11.2-cs8 -(01 Jun 2017) - -* Fix an issue where if a volume using the local volume driver which has -mount options fails to unmount on container exit, the data in the mount may be -lost if the user attempts to subsequently remove the volume. [#32327](https://github.com/docker/docker/pulls/32327) - -## CS Engine 1.11.2-cs7 -(24 Jan 2017) - -This release addresses the following issues: - -* [#26639](https://github.com/docker/docker/issues/26639) Resolves hostnames passed -to the local volume driver for nfs mount options. -* [#26111](https://github.com/docker/docker/issues/26111) Fix issue with adding -iptables rules due to xtables lock message change. -* [#1572](https://github.com/docker/libnetwork/issues/1572) Fix daemon panic -* [#1130](https://github.com/docker/libnetwork/pull/1130) Fix IPAM out of sync -issue on ungraceful shutdown. - -## CS Engine 1.11.2-cs6 -(12 Jan 2017) - -Bumps RunC version to address CVE-2016-9962. - -## CS Engine 1.11.2-cs5 -(13 Sep 2016) - -This release addresses the following issues: - -* Make the docker daemon ignore the `SIGPIPE` signal -[#19728](https://github.com/docker/docker/issues/19728) -* Fix race in libdevicemapper symlink handling -[#24671](https://github.com/docker/docker/issues/24671) -* Generate additional logging when unmarshalling devicemapper metadata -[#23974](https://github.com/docker/docker/pull/23974) -* Drop queries in root domain when ndots is set -[#1441](https://github.com/docker/libnetwork/pull/1441) - -## CS Engine 1.11.2-cs4 -(16 Aug 2016) - -This release addresses the following issues: - -* Change systemd kill mode to `process` so systemd only stops the docker daemon -[#21933](https://github.com/docker/docker/issues/21933) -* Fix dropped external DNS responses when greater than 512 bytes -[#1373](https://github.com/docker/libnetwork/pull/1373) -* Remove UDP connection caching in embedded DNS server -[#1352](https://github.com/docker/libnetwork/pull/1352) -* Fix issue where truncated DNS replies were discarded by the embedded DNS server -[#1351](https://github.com/docker/libnetwork/pull/1351) - -## CS Engine 1.11.2-cs3 -(7 Jun 2016) - -This release addresses the following issues: - -* Fix potential panic when running `docker build` -[#23032](https://github.com/docker/docker/pull/23032) -* Fix interpretation of `--user` parameter -[#22998](https://github.com/docker/docker/pull/22998) -* Fix a bug preventing container statistics from being correctly reported -[#22955](https://github.com/docker/docker/pull/22955) -* Fix an issue preventing containers from being restarted after daemon restart -[#22947](https://github.com/docker/docker/pull/22947) -* Fix a possible deadlock on image deletion and container attach -[#22918](https://github.com/docker/docker/pull/22918) -* Fix an issue causing `docker ps` to hang when using devicemapper -[#22168](https://github.com/docker/docker/pull/22168) -* Fix a bug preventing to `docker exec` into a container when using -devicemapper [#22168](https://github.com/docker/docker/pull/22168) - -## CS Engine 1.11.1-cs2 -(17 May 2016) - -This release fixes the following issue which prevented DTR containers to be automatically restarted on a docker daemon restart: - -https://github.com/docker/docker/issues/22486 - -## CS Engine 1.11.1-cs1 -(27 April 2016) - -In this release the CS Engine is supported on RHEL 7.2 OS diff --git a/cs-engine/1.12/upgrade.md b/cs-engine/1.12/upgrade.md deleted file mode 100644 index 8e80bdae63..0000000000 --- a/cs-engine/1.12/upgrade.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -description: Learn how to install the commercially supported version of Docker Engine. -keywords: docker, engine, dtr, upgrade -redirect_from: -- /docker-trusted-registry/cs-engine/upgrade/ -- /cs-engine/upgrade/ -title: Upgrade Commercially Supported Docker Engine ---- - -This article explains how to upgrade your CS Docker Engine. - -The upgrade process depends on the version that is currently installed and the -version that you want to upgrade to: - -* [Upgrade from the same minor version](upgrade.md#upgrade-from-the-same-minor-version) -* [Upgrade from the same major version](upgrade.md#upgrade-from-the-same-major-version) -* [Upgrade from a legacy version](upgrade.md#upgrade-from-a-legacy-version) - -Before starting the upgrade, make sure you stop all containers running on the -host. This ensures your containers have time for cleaning up before exiting, -thus avoiding data loss or corruption. - -## Upgrade from the same minor version - -Use these instructions if you're upgrading your CS Docker Engine within the -same minor version. As an example, from 1.1.0 to 1.1.1. - -### CentOS 7.1 & RHEL 7.0/7.1 -Use these instructions to upgrade YUM-based systems. - -1. Update your docker-engine package: - - ```bash - $ sudo yum upgrade docker-engine - ``` - -2. Check that the CS Docker Engine is running: - - ```bash - $ sudo docker info - ``` - -### Ubuntu 14.04 LTS -Use these instructions to upgrade APT-based systems. - -1. Update your docker-engine package: - - ```bash - $ sudo apt-get update && sudo apt-get upgrade docker-engine - ``` - -2. Check that the CS Docker Engine is running: - - ```bash - $ sudo docker info - ``` - -### SUSE Enterprise 12.3 - -1. Update your docker-engine package: - - ```bash - $ sudo zypper upgrade docker-engine - ``` - -2. Check that the CS Docker Engine is running: - - ```bash - $ sudo docker info - ``` - - -## Upgrade from the same major version - -Use these instructions if you're upgrading your CS Docker Engine within the -same major version. As an example, from 1.1.x to 1.2.x. - - -### CentOS 7.1 & RHEL 7.0/7.1 -Use these instructions to upgrade YUM-based systems. - -1. Add the Docker Engine repository. - - ```bash - $ sudo yum-config-manager --add-repo https://packages.docker.com/1.12/yum/repo/main/centos/7 - ``` - - This adds the repository of the latest version of CS Docker Engine. You can - customize the URL to install other versions. - -2. Install the new package: - - ```bash - $ sudo yum update docker-engine - ``` - -3. Check that the CS Engine is running: - - ```bash - $ sudo docker info - ``` - -### Ubuntu 14.04 LTS -Use these instructions to update APT-based systems. - - -1. Add the docker engine repository. - - ```bash - $ echo "deb https://packages.docker.com/1.12/apt/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list - ``` - - This adds the repository of the latest version of CS Docker Engine for the - Ubuntu Trusty distribution. Change the "ubuntu-trusty" string to the - distribution you're using: - - * debian-jessie (Debian 8) - * debian-stretch (future release) - * debian-wheezy (Debian 7) - * ubuntu-precise (Ubuntu 12.04) - * ubuntu-trusty (Ubuntu 14.04) - * ubuntu-utopic (Ubuntu 14.10) - * ubuntu-vivid (Ubuntu 15.04) - * ubuntu-wily (Ubuntu 15.10) - -2. Update your docker-engine package. - - ```bash - $ sudo apt-get update && sudo apt-get upgrade docker-engine - ``` - -3. Check that the CS Engine is running: - - ```bash - $ sudo docker info - ``` - -#### SUSE Enterprise 12.3 - -1. Add the docker engine repository. - - ```bash - $ sudo zypper ar -t YUM https://packages.docker.com/1.12/yum/repo/main/opensuse/12.3 docker-1.12 - ``` - - This adds the repository of the latest version of CS Docker Engine. You - can customize the URL to install other versions. - -2. Install the new package: - - ```bash - $ sudo zypper update docker-engine - ``` - -3. Check that the CS Engine is running: - - ```bash - $ sudo docker info - ``` - - -## Upgrade from a legacy version - -Use these instructions if you're upgrading your CS Docker Engine from a version -prior to 1.9. In this case, first uninstall CS Docker Engine, and -then install the latest version. - -### CentOS 7.1 & RHEL 7.0/7.1 -Use these instructions to upgrade YUM-based systems. - -1. Remove the current CS Engine: - - ```bash - $ sudo yum remove docker-engine-cs - ``` - -2. Add the Docker public key for CS packages: - - ```bash - $ sudo rpm --import "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e" - ``` - - **Note**: If the key server above does not respond, you can try one of these: - - pgp.mit.edu - - keyserver.ubuntu.com - -3. Install yum-utils if necessary: - - ```bash - $ sudo yum install -y yum-utils - ``` - -4. Add the repository for the new version and disable the old one. - - ```bash - $ sudo yum-config-manager --add-repo https://packages.docker.com/1.12/yum/repo/main/centos/7 - $ sudo yum-config-manager --disable 'Docker_cs*' - ``` - - This adds the repository of the latest version of CS Docker Engine. You - can customize the URL to install other versions. - -5. Install the new package: - - ```bash - $ sudo yum install docker-engine - ``` - -6. Enable the Docker daemon as a service and start it. - - ```bash - $ sudo systemctl enable docker.service - $ sudo systemctl start docker.service - ``` - -### Ubuntu 14.04 LTS -Use these instructions to update APT-based systems. - - -1. Remove the current Engine: - - ```bash - $ sudo apt-get remove docker-engine-cs - ``` - -2. Add the Docker public key for CS packages: - - ```bash - $ curl -s 'https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e' | sudo apt-key add --import - ``` - - **Note**: If the key server above does not respond, you can try one of these: - - pgp.mit.edu - - keyserver.ubuntu.com - -3. Install the HTTPS helper for apt (your system may already have it): - - ```bash - $ sudo apt-get update && sudo apt-get install apt-transport-https - ``` - -4. Install additional virtual drivers not in the parent image. - - ```bash - $ sudo apt-get install -y linux-image-extra-virtual - ``` - - You may need to reboot your server after updating the LTS kernel. - -5. Add the repository for the new version: - - ```bash - $ echo "deb https://packages.docker.com/1.12/apt/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list - ``` - - This adds the repository of the latest version of CS Docker Engine for the - Ubuntu Trusty distribution. Change the "ubuntu-trusty" string to the - distribution you're using: - - * debian-jessie (Debian 8) - * debian-stretch (future release) - * debian-wheezy (Debian 7) - * ubuntu-precise (Ubuntu 12.04) - * ubuntu-trusty (Ubuntu 14.04) - * ubuntu-utopic (Ubuntu 14.10) - * ubuntu-vivid (Ubuntu 15.04) - * ubuntu-wily (Ubuntu 15.10) - - -6. Install the upgraded package: - - ```bash - $ sudo apt-get upgrade docker-engine - ``` diff --git a/cs-engine/1.13/index.md b/cs-engine/1.13/index.md deleted file mode 100644 index 365eed32ba..0000000000 --- a/cs-engine/1.13/index.md +++ /dev/null @@ -1,341 +0,0 @@ ---- -title: Install CS Docker Engine -description: Learn how to install the commercially supported version of Docker Engine. -keywords: docker, engine, install -redirect_from: -- /docker-trusted-registry/install/engine-ami-launch/ -- /docker-trusted-registry/install/install-csengine/ -- /docker-trusted-registry/cs-engine/install/ -- /cs-engine/install/ ---- - -Follow these instructions to install CS Docker Engine, the commercially -supported version of Docker Engine. - -CS Docker Engine can be installed on the following operating systems: - -* [CentOS 7.1/7.2 & RHEL 7.0/7.1/7.2/7.3 (YUM-based systems)](#install-on-centos-7172--rhel-70717273-yum-based-systems) -* [Ubuntu 14.04 LTS or 16.04 LTS](#install-on-ubuntu-1404-lts-or-1604-lts) -* [SUSE Linux Enterprise 12](#install-on-suse-linux-enterprise-123) - -You can install CS Docker Engine using a repository or using packages. - -- If you [use a repository](#install-using-a-repository), your operating system - will notify you when updates are available and you can upgrade or downgrade - easily, but you need an internet connection. This approach is recommended. - -- If you [use packages](#install-using-packages), you can install CS Docker - Engine on air-gapped systems that have no internet connection. However, you - are responsible for manually checking for updates and managing upgrades. - -## Prerequisites - -To install CS Docker Engine, you need root or sudo privileges and you need -access to a command line on the system. - -## Install using a repository - -### Install on CentOS 7.1/7.2 & RHEL 7.0/7.1/7.2/7.3 (YUM-based systems) - -This section explains how to install on CentOS 7.1/7.2 & RHEL 7.0/7.1/7.2/7.3. Only -these versions are supported. CentOS 7.0 is **not** supported. On RHEL, -depending on your current level of updates, you may need to reboot your server -to update its RHEL kernel. - -1. Add the Docker public key for CS Docker Engine packages: - - ```bash - $ sudo rpm --import "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e" - ``` - - > **Note**: If the key server above does not respond, you can try one of these: - > - > - pgp.mit.edu - > - keyserver.ubuntu.com - -2. Install yum-utils if necessary: - - ```bash - $ sudo yum install -y yum-utils - ``` - -3. Add the Docker repository: - - ```bash - $ sudo yum-config-manager --add-repo https://packages.docker.com/1.13/yum/repo/main/centos/7 - ``` - - This adds the repository of the latest version of CS Docker Engine. You can - customize the URL to install an older version. - -4. Install Docker CS Engine: - - - **Latest version**: - - ```bash - $ sudo yum makecache fast - - $ sudo yum install docker-engine - ``` - - - **Specific version**: - - On production systems, you should install a specific version rather than - relying on the latest. - - 1. List the available versions: - - ```bash - $ yum list docker-engine.x86_64 --showduplicates |sort -r - ``` - - The second column represents the version. - - 2. Install a specific version by adding the version after `docker-engine`, - separated by a hyphen (`-`): - - ```bash - $ sudo yum install docker-engine- - ``` - -5. Configure `devicemapper`: - - By default, the `devicemapper` graph driver does not come pre-configured in - a production-ready state. Follow the documented step by step instructions to - [configure devicemapper with direct-lvm for production](/engine/userguide/storagedriver/device-mapper-driver/#configure-direct-lvm-mode-for-production) - to achieve the best performance and reliability for your environment. - -6. Configure the Docker daemon to start automatically when the system starts, - and start it now. - - ```bash - $ sudo systemctl enable docker.service - $ sudo systemctl start docker.service - ``` - -7. Confirm the Docker daemon is running: - - ```bash - $ sudo docker info - ``` - -8. Only users with `sudo` access can run `docker` commands. - Optionally, add non-sudo access to the Docker socket by adding your user - to the `docker` group. - - ```bash - $ sudo usermod -a -G docker $USER - ``` - -9. Log out and log back in to have your new permissions take effect. - -### Install on Ubuntu 14.04 LTS or 16.04 LTS - -1. Install packages to allow `apt` to use a repository over HTTPS: - - ```bash - $ sudo apt-get update - - $ sudo apt-get install --no-install-recommends \ - apt-transport-https \ - curl \ - software-properties-common - ``` - - Optionally, install additional kernel modules to add AUFS support. - - ```bash - $ sudo apt-get install -y --no-install-recommends \ - linux-image-extra-$(uname -r) \ - linux-image-extra-virtual - ``` - -2. Download and import Docker's public key for CS packages: - - ```bash - $ curl -fsSL 'https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e' | sudo apt-key add - - ``` - - > **Note**: If the key server above does not respond, you can try one of these: - > - > - pgp.mit.edu - > - keyserver.ubuntu.com - -3. Add the repository. In the command below, the `lsb_release -cs` sub-command - returns the name of your Ubuntu version, like `xenial` or `trusty`. - - ```bash - $ sudo add-apt-repository \ - "deb https://packages.docker.com/1.13/apt/repo/ \ - ubuntu-$(lsb_release -cs) \ - main" - ``` - -4. Install CS Docker Engine: - - - **Latest version**: - - ```bash - $ sudo apt-get update - - $ sudo apt-get -y install docker-engine - ``` - - - **Specific version**: - - On production systems, you should install a specific version rather than - relying on the latest. - - 1. List the available versions: - - ```bash - $ sudo apt-get update - - $ apt-cache madison docker-engine - ``` - - The second column represents the version. - - 2. Install a specific version by adding the version after `docker-engine`, - separated by an equals sign (`=`): - - ```bash - $ sudo apt-get install docker-engine= - ``` - -5. Confirm the Docker daemon is running: - - ```bash - $ sudo docker info - ``` - -6. Only users with `sudo` access can run `docker` commands. - Optionally, add non-sudo access to the Docker socket by adding your user - to the `docker` group. - - ```bash - $ sudo usermod -a -G docker $USER - ``` - - Log out and log back in to have your new permissions take effect. - -### Install on SUSE Linux Enterprise 12.3 - -1. Refresh your repository: - - ```bash - $ sudo zypper update - ``` - -2. Add the Docker repository and public key: - - ```bash - $ sudo zypper ar -t YUM https://packages.docker.com/1.13/yum/repo/main/opensuse/12.3 docker-1.13 - $ sudo rpm --import 'https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e' - ``` - - This adds the repository of the latest version of CS Docker Engine. You can - customize the URL to install an older version. - - > **Note**: If the key server above does not respond, you can try one of these: - > - > - pgp.mit.edu - > - keyserver.ubuntu.com - -3. Install CS Docker Engine. - - - **Latest version**: - - ```bash - $ sudo zypper refresh - - $ sudo zypper install docker-engine - ``` - - - **Specific version**: - - On production systems, you should install a specific version rather than - relying on the latest. - - 1. List the available versions: - - ```bash - $ sudo zypper refresh - - $ zypper search -s --match-exact -t package docker-engine - ``` - - The third column is the version string. - - 2. Install a specific version by adding the version after `docker-engine`, - separated by a hyphen (`-`): - - ```bash - $ sudo zypper install docker-engine- - ``` - -4. Configure the Docker daemon to start automatically when the system starts, - and start it now. - - ```bash - $ sudo systemctl enable docker.service - $ sudo systemctl start docker.service - ``` - -5. Confirm the Docker daemon is running: - - ```bash - $ sudo docker info - ``` - -6. Only users with `sudo` access can run `docker` commands. - Optionally, add non-sudo access to the Docker socket by adding your user - to the `docker` group. - - ```bash - $ sudo usermod -a -G docker $USER - ``` - - Log out and log back in to have your new permissions take effect. - -7. [Configure Btrfs for graph storage](/engine/userguide/storagedriver/btrfs-driver.md). - This is the only graph storage driver supported on SLES. - -## Install using packages - -If you need to install Docker on an air-gapped system with no access to the -internet, use the [package download link table](#package-download-links) to -download the Docker package for your operating system, then install it using the -[appropriate command](#general-commands). You are responsible for manually -upgrading Docker when a new version is available, and also for satisfying -Docker's dependencies. - -### General commands - -To install Docker from packages, use the following commands: - -| Operating system | Command | -|-----------------------|---------| -| RHEL / CentOS / SLES | `$ sudo yum install /path/to/package.rpm` | -| Ubuntu | `$ sudo dpkg -i /path/to/package.deb` | - -### Package download links - -{% assign rpm-prefix = "https://packages.docker.com/1.13/yum/repo/main" %} -{% assign deb-prefix = "https://packages.docker.com/1.13/apt/repo/pool/main/d/docker-engine" %} - -#### CS Docker Engine 1.13.1 - -{% comment %} Check on the S3 bucket for packages.docker.com for the versions. {% endcomment %} -{% assign rpm-version = "1.13.1.cs8-1" %} -{% assign rpm-rhel-version = "1.13.1.cs8-1" %} -{% assign deb-version = "1.13.1~cs8-0" %} - -| Operating system | Package links | -|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| RHEL 7.x and CentOS 7 | [docker-engine]({{ rpm-prefix }}/centos/7/Packages/docker-engine-{{ rpm-version}}.el7.centos.x86_64.rpm), [docker-engine-debuginfo]({{ rpm-prefix }}/centos/7/Packages/docker-engine-debuginfo-{{ rpm-version }}.el7.centos.x86_64.rpm), [docker-engine-selinux]({{ rpm-prefix }}/centos/7/Packages/docker-engine-selinux-{{ rpm-version}}.el7.centos.noarch.rpm) | -| RHEL 7.2 (only use if you have problems with `selinux` with the packages above) | [docker-engine]({{ rpm-prefix }}/rhel/7.2/Packages/docker-engine-{{ rpm-rhel-version }}.el7.centos.x86_64.rpm), [docker-engine-debuginfo]({{ rpm-prefix }}/rhel/7.2/Packages/docker-engine-debuginfo-{{ rpm-rhel-version }}.el7.centos.x86_64.rpm), [docker-engine-selinux]({{ rpm-prefix }}/rhel/7.2/Packages/docker-engine-selinux-{{ rpm-rhel-version }}.el7.centos.noarch.rpm) | -| SLES 12 | [docker-engine]({{ rpm-prefix }}/opensuse/12.3/Packages/docker-engine-{{ rpm-version }}.x86_64.rpm) | -| Ubuntu Xenial | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-xenial_amd64.deb) | -| Ubuntu Trusty | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-trusty_amd64.deb) | -| Ubuntu Precise | [docker-engine]({{ deb-prefix }}/docker-engine_{{ deb-version }}~ubuntu-precisel_amd64.deb) | diff --git a/cs-engine/1.13/release-notes.md b/cs-engine/1.13/release-notes.md deleted file mode 100644 index fe30493f5f..0000000000 --- a/cs-engine/1.13/release-notes.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: CS Docker Engine 1.13 release notes -description: Commercially supported Docker Engine release notes -keywords: docker, engine, install, release notes -redirect_from: -- /docker-trusted-registry/cs-engine/release-notes/ -- /cs-engine/release-notes/ ---- - -This document describes the latest changes, additions, known issues, and fixes -for the commercially supported Docker Engine (CS Engine). - -The CS Engine is functionally equivalent to the corresponding Docker Engine that -it references. However, a commercially supported release also includes -back-ported fixes (security-related and priority defects) from the open source. -It incorporates defect fixes that you can use in environments where new features -cannot be adopted as quickly for consistency and compatibility reasons. - -[Looking for the release notes for Docker CS Engine 1.12?](/cs-engine/1.12/release-notes/index.md) - -## CS Engine 1.13.1-cs9 (2017-12-13) - -* Handle cleanup DNS for attachable container to prevent leak in name resolution -[docker/libnetwork#1999](https://github.com/docker/libnetwork/pull/1999) -* When a node is removed, delete all of its attachment tasks so networks use -by those tasks can be removed [docker/swarmkit#2417](https://github.com/docker/swarmkit/pull/2417) -* Increase gRPC request timeout to 20 seconds for sending snapshots to prevent -`context deadline exceeded` errors [docker/swarmkit#2406](https://github.com/docker/swarmkit/pull/2406) -* Avoid using a map for log attributes to prevent panic -[moby/moby#34174](https://github.com/moby/moby/pull/34174) -* Fix "raw" mode with the Splunk logging driver -[moby/moby#34520](https://github.com/moby/moby/pull/34520) -* Don't unmount entire plugin manager tree on remove -[moby/moby#33422](https://github.com/moby/moby/pull/33422) -* Redact secret data on secret creation [moby/moby#33884](https://github.com/moby/moby/pull/33884) -* Sort secrets and configs to ensure idempotence and prevent -`docker stack deploy` from useless restart of services [docker/cli#509](https://github.com/docker/cli/pull/509) -* Automatically set `may_detach_mounts=1` on startup to prevent -`device or resource busy` errors [moby/moby#34886](https://github.com/moby/moby/pull/34886) -* Don't abort when setting `may_detach_mounts` -[moby/moby#35172](https://github.com/moby/moby/pull/35172) - -## CS Engine 1.13.1-cs8 (2017-11-17) - -* Protect health monitor channel to prevent engine panic [moby/moby#35482](https://github.com/moby/moby/pull/35482) - -## CS Engine 1.13.1-cs7 (2017-10-13) - -* Fix logic in network resource reaping to prevent memory leak [docker/libnetwork#1944](https://github.com/docker/libnetwork/pull/1944) [docker/libnetwork#1960](https://github.com/docker/libnetwork/pull/1960) -* Increase max GRPC message size to 128MB for larger snapshots so newly added managers can successfully join [docker/swarmkit#2375](https://github.com/docker/swarmkit/pull/2375) - -## CS Engine 1.13.1-cs6 (2017-08-24) - -* Fix daemon panic on docker image push [moby/moby#33105](https://github.com/moby/moby/pull/33105) -* Fix panic in concurrent network creation/deletion operations [docker/libnetwork#1861](https://github.com/docker/libnetwork/pull/1861) -* Improve network db stability under stressful situations [docker/libnetwork#1860](https://github.com/docker/libnetwork/pull/1860) -* Enable TCP Keep-Alive in Docker client [docker/cli#415](https://github.com/docker/cli/pull/415) -* Lock goroutine to OS thread while changing NS [docker/libnetwork#1911](https://github.com/docker/libnetwork/pull/1911) -* Ignore PullOptions for running tasks [docker/swarmkit#2351](https://github.com/docker/swarmkit/pull/2351) - -## CS Engine 1.13.1-cs5 (21 Jul 2017) - -* Add more locking to storage drivers [#31136](https://github.com/moby/moby/pull/31136) -* Prevent data race on `docker network connect/disconnect` [#33456](https://github.com/moby/moby/pull/33456) -* Improve service discovery reliability [#1796](https://github.com/docker/libnetwork/pull/1796) [#18078](https://github.com/docker/libnetwork/pull/1808) -* Fix resource leak in swarm mode [#2215](https://github.com/docker/swarmkit/pull/2215) -* Optimize `docker system df` for volumes on NFS [#33620](https://github.com/moby/moby/pull/33620) -* Fix validation bug with host-mode ports in swarm mode [#2177](https://github.com/docker/swarmkit/pull/2177) -* Fix potential crash in swarm mode [#2268](https://github.com/docker/swarmkit/pull/2268) -* Improve network control-plane reliability [#1704](https://github.com/docker/libnetwork/pull/1704) -* Do not error out when selinux relabeling is not supported on volume filesystem [#33831](https://github.com/moby/moby/pull/33831) -* Remove debugging code for aufs ebusy errors [#31665](https://github.com/moby/moby/pull/31665) -* Prevent resource leak on healthchecks [#33781](https://github.com/moby/moby/pull/33781) -* Fix issue where containerd supervisor may exit prematurely [#32590](https://github.com/moby/moby/pull/32590) -* Fix potential containerd crash [#2](https://github.com/docker/containerd/pull/2) -* Ensure server details are set in client even when an error is returned [#33827](https://github.com/moby/moby/pull/33827) -* Fix issue where slow/dead `docker logs` clients can block the container [#33897](https://github.com/moby/moby/pull/33897) -* Fix potential panic on Windows when running as a service [#32244](https://github.com/moby/moby/pull/32244) - -## CS Engine 1.13.1-cs4 (01 Jun 2017) - -Backports all fixes from [17.03.2](https://github.com/moby/moby/releases/tag/v17.03.2-ce) - -**Note**: This release includes a fix for potential data loss under certain -circumstances with the local (built-in) volume driver. - -## CS Engine 1.13.1-cs3 -(30 Mar 2017) - -Backports all fixes from 17.03.1 - -* Fix issue with swarm CA timeouts [#2063](https://github.com/docker/swarmkit/pull/2063) [#2064](https://github.com/docker/swarmkit/pull/2064/files) - -## CS Engine 1.13.1-cs2 (23 Feb 2017) - -### Client - -* Fix panic in `docker stats --format` [#30776](https://github.com/docker/docker/pull/30776) - -### Contrib - -* Update various `bash` and `zsh` completion scripts [#30823](https://github.com/docker/docker/pull/30823), [#30945](https://github.com/docker/docker/pull/30945) and more... -* Block obsolete socket families in default seccomp profile - mitigates unpatched kernels' CVE-2017-6074 [#29076](https://github.com/docker/docker/pull/29076) - -### Networking - -* Fix bug on overlay encryption keys rotation in cross-datacenter swarm [#30727](https://github.com/docker/docker/pull/30727) -* Fix side effect panic in overlay encryption and network control plane communication failure ("No installed keys could decrypt the message") on frequent swarm leader re-election [#25608](https://github.com/docker/docker/pull/25608) -* Several fixes around system responsiveness and datapath programming when using overlay network with external kv-store [docker/libnetwork#1639](https://github.com/docker/libnetwork/pull/1639), [docker/libnetwork#1632](https://github.com/docker/libnetwork/pull/1632) and more... -* Discard incoming plain vxlan packets for encrypted overlay network [#31170](https://github.com/docker/docker/pull/31170) -* Release the network attachment on allocation failure [#31073](https://github.com/docker/docker/pull/31073) -* Fix port allocation when multiple published ports map to the same target port [docker/swarmkit#1835](https://github.com/docker/swarmkit/pull/1835) - -### Runtime - -* Fix a deadlock in docker logs [#30223](https://github.com/docker/docker/pull/30223) -* Fix CPU spin waiting for log write events [#31070](https://github.com/docker/docker/pull/31070) -* Fix a possible crash when using journald [#31231](https://github.com/docker/docker/pull/31231) [#31263](https://github.com/docker/docker/pull/31231) -* Fix a panic on close of nil channel [#31274](https://github.com/docker/docker/pull/31274) -* Fix duplicate mount point for `--volumes-from` in `docker run` [#29563](https://github.com/docker/docker/pull/29563) -* Fix `--cache-from` does not cache last step [#31189](https://github.com/docker/docker/pull/31189) -* Fix issue with lock contention while performing container size calculation [#31159](https://github.com/docker/docker/pull/31159) - -### Swarm Mode - -* Shutdown leaks an error when the container was never started [#31279](https://github.com/docker/docker/pull/31279) -* Fix possibility of tasks getting stuck in the "NEW" state during a leader failover [docker/swarmkit#1938](https://github.com/docker/swarmkit/pull/1938) -* Fix extraneous task creations for global services that led to confusing replica counts in `docker service ls` [docker/swarmkit#1957](https://github.com/docker/swarmkit/pull/1957) -* Fix problem that made rolling updates slow when `task-history-limit` was set to 1 [docker/swarmkit#1948](https://github.com/docker/swarmkit/pull/1948) -* Restart tasks elsewhere, if appropriate, when they are shut down as a result of nodes no longer satisfying constraints [docker/swarmkit#1958](https://github.com/docker/swarmkit/pull/1958) - -## CS Engine 1.13.1-cs1 (08 Feb 2017) - -Refer to the detailed lists of changes since the release of CS Engine 1.12.6-cs8 -by reviewing the changes in [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0) -and [v1.13.1](https://github.com/docker/docker/releases/tag/v1.13.1). diff --git a/cs-engine/1.13/upgrade.md b/cs-engine/1.13/upgrade.md deleted file mode 100644 index 12e2e7ae69..0000000000 --- a/cs-engine/1.13/upgrade.md +++ /dev/null @@ -1,268 +0,0 @@ ---- -title: Upgrade Commercially Supported Docker Engine -description: Learn how to upgrade the commercially supported version of Docker Engine. -keywords: docker, engine, upgrade ---- - -This article explains how to upgrade your CS Docker Engine. - -The upgrade process depends on the version that is currently installed and the -version that you want to upgrade to: - -* [Upgrade from the same minor version](upgrade.md#upgrade-from-the-same-minor-version) -* [Upgrade from the same major version](upgrade.md#upgrade-from-the-same-major-version) -* [Upgrade from a legacy version](upgrade.md#upgrade-from-a-legacy-version) - -Before starting the upgrade, make sure you stop all containers running on the -host. This ensures your containers have time for cleaning up before exiting, -thus avoiding data loss or corruption. - -## Upgrade from the same minor version - -Use these instructions if you're upgrading your CS Docker Engine within the -same minor version. As an example, from 1.1.0 to 1.1.1. - -### CentOS 7.1 & RHEL 7.0/7.1/7.2 -Use these instructions to upgrade YUM-based systems. - -1. Update your docker-engine package: - - ```bash - $ sudo yum upgrade docker-engine - ``` - -2. Check that the CS Docker Engine is running: - - ```bash - $ sudo docker info - ``` - -### Ubuntu 14.04 LTS or 16.04 LTS -Use these instructions to upgrade APT-based systems. - -1. Update your docker-engine package: - - ```bash - $ sudo apt-get update && sudo apt-get upgrade docker-engine - ``` - -2. Check that the CS Docker Engine is running: - - ```bash - $ sudo docker info - ``` - -### SUSE Enterprise 12.3 - -1. Update your docker-engine package: - - ```bash - $ sudo zypper upgrade docker-engine - ``` - -2. Check that the CS Docker Engine is running: - - ```bash - $ sudo docker info - ``` - - -## Upgrade from the same major version - -Use these instructions if you're upgrading your CS Docker Engine within the -same major version. As an example, from 1.1.x to 1.2.x. - - -### CentOS 7.1 & RHEL 7.0/7.1/7.2 -Use these instructions to upgrade YUM-based systems. - -1. Add the Docker Engine repository. - - ```bash - $ sudo yum-config-manager --add-repo https://packages.docker.com/1.13/yum/repo/main/centos/7 - ``` - - This adds the repository of the latest version of CS Docker Engine. You can - customize the URL to install other versions. - -2. Install the new package: - - ```bash - $ sudo yum update docker-engine - ``` - -3. Check that the CS Engine is running: - - ```bash - $ sudo docker info - ``` - -### Ubuntu 14.04 LTS or 16.04 LTS -Use these instructions to update APT-based systems. - - -1. Add the docker engine repository. - - ```bash - $ echo "deb https://packages.docker.com/1.13/apt/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list - ``` - - This adds the repository of the latest version of CS Docker Engine for the - Ubuntu Trusty distribution. Change the "ubuntu-trusty" string to the - distribution you're using: - - * debian-jessie (Debian 8) - * debian-stretch (future release) - * debian-wheezy (Debian 7) - * ubuntu-precise (Ubuntu 12.04) - * ubuntu-trusty (Ubuntu 14.04) - * ubuntu-xenial (Ubuntu 16.04) - -2. Update your docker-engine package. - - ```bash - $ sudo apt-get update && sudo apt-get upgrade docker-engine - ``` - -3. Check that the CS Engine is running: - - ```bash - $ sudo docker info - ``` - -#### SUSE Enterprise 12.3 - -1. Add the docker engine repository. - - ```bash - $ sudo zypper ar -t YUM https://packages.docker.com/1.13/yum/repo/main/opensuse/12.3 docker-1.13 - ``` - - This adds the repository of the latest version of CS Docker Engine. You - can customize the URL to install other versions. - -2. Install the new package: - - ```bash - $ sudo zypper update docker-engine - ``` - -3. Check that the CS Engine is running: - - ```bash - $ sudo docker info - ``` - - -## Upgrade from a legacy version - -Use these instructions if you're upgrading your CS Docker Engine from a version -prior to 1.9. In this case, first uninstall CS Docker Engine, and -then install the latest version. - -### CentOS 7.1 & RHEL 7.0/7.1 -Use these instructions to upgrade YUM-based systems. - -1. Remove the current CS Engine: - - ```bash - $ sudo yum remove docker-engine-cs - ``` - -2. Add the Docker public key for CS packages: - - ```bash - $ sudo rpm --import "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e" - ``` - - Note: if the key server above does not respond, you can try one of these: - - pgp.mit.edu - - keyserver.ubuntu.com - -3. Install yum-utils if necessary: - - ```bash - $ sudo yum install -y yum-utils - ``` - -4. Add the repository for the new version and disable the old one. - - ```bash - $ sudo yum-config-manager --add-repo https://packages.docker.com/1.13/yum/repo/main/centos/7 - $ sudo yum-config-manager --disable 'Docker_cs*' - ``` - - This adds the repository of the latest version of CS Docker Engine. You - can customize the URL to install other versions. - -5. Install the new package: - - ```bash - $ sudo yum install docker-engine - ``` - -6. Enable the Docker daemon as a service and start it. - - ```bash - $ sudo systemctl enable docker.service - $ sudo systemctl start docker.service - ``` - -### Ubuntu 14.04 LTS -Use these instructions to update APT-based systems. - - -1. Remove the current Engine: - - ```bash - $ sudo apt-get remove docker-engine-cs - ``` - -2. Add the Docker public key for CS packages: - - ```bash - $ curl -s 'https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e' | sudo apt-key add --import - ``` - - Note: if the key server above does not respond, you can try one of these: - - pgp.mit.edu - - keyserver.ubuntu.com - -3. Install the HTTPS helper for apt (your system may already have it): - - ```bash - $ sudo apt-get update && sudo apt-get install apt-transport-https - ``` - -4. Install additional virtual drivers not in the parent image. - - ```bash - $ sudo apt-get install -y linux-image-extra-virtual - ``` - - You may need to reboot your server after updating the LTS kernel. - -5. Add the repository for the new version: - - ```bash - $ echo "deb https://packages.docker.com/1.13/apt/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list - ``` - - This adds the repository of the latest version of CS Docker Engine for the - Ubuntu Trusty distribution. Change the "ubuntu-trusty" string to the - distribution you're using: - - * debian-jessie (Debian 8) - * debian-stretch (future release) - * debian-wheezy (Debian 7) - * ubuntu-precise (Ubuntu 12.04) - * ubuntu-trusty (Ubuntu 14.04) - * ubuntu-xenial (Ubuntu 16.04) - - - -6. Install the upgraded package: - - ```bash - $ sudo apt-get upgrade docker-engine - ``` diff --git a/datacenter/dtr/2.5/guides/admin/monitor-and-troubleshoot/troubleshoot-dtr.md b/datacenter/dtr/2.5/guides/admin/monitor-and-troubleshoot/troubleshoot-dtr.md index 58d203e04f..a8318eaa20 100644 --- a/datacenter/dtr/2.5/guides/admin/monitor-and-troubleshoot/troubleshoot-dtr.md +++ b/datacenter/dtr/2.5/guides/admin/monitor-and-troubleshoot/troubleshoot-dtr.md @@ -67,9 +67,11 @@ On a healthy cluster the output will be `[]`. Starting in DTR 2.5.5, you can run RethinkCLI from a separate image. First, set an environment variable for your DTR replica ID: +{% raw %} ```bash REPLICA_ID=$(docker inspect -f '{{.Name}}' $(docker ps -q -f name=dtr-rethink) | cut -f 3 -d '-') ``` +{% endraw %} RethinkDB stores data in different databases that contain multiple tables. Run the following command to get into interactive mode and query the contents of the DB: diff --git a/datacenter/ucp/3.0/guides/admin/configure/use-trusted-images-for-ci.md b/datacenter/ucp/3.0/guides/admin/configure/use-trusted-images-for-ci.md deleted file mode 100644 index 0a563d339d..0000000000 --- a/datacenter/ucp/3.0/guides/admin/configure/use-trusted-images-for-ci.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -description: Set up and configure content trust and signing policy for use with a continuous integration system -keywords: cup, trust, notary, security, continuous integration -title: Use trusted images for continuous integration ---- - -The document provides a minimal example on setting up Docker Content Trust (DCT) in -Universal Control Plane (UCP) for use with a Continuous Integration (CI) system. It -covers setting up the necessary accounts and trust delegations to restrict only those -images built by your CI system to be deployed to your UCP managed cluster. - -## Set up UCP accounts and teams - -The first step is to create a user account for your CI system. For the purposes of -this document we will assume you are using Jenkins as your CI system and will therefore -name the account "jenkins". As an admin user logged in to UCP, navigate to "User Management" -and select "Add User". Create a user with the name "jenkins" and set a strong password. - -Next, create a team called "CI" and add the "jenkins" user to this team. All signing -policy is team based, so if we want to grant only a single user the ability to sign images -destined to be deployed on the cluster, we must create a team for this one user. - -## Set up the signing policy - -While still logged in as an admin, navigate to "Admin Settings" and select the "Content Trust" -subsection. Select the checkbox to enable content trust and in the select box that appears, -select the "CI" team we have just created. Save the settings. - -This policy will require that every image that referenced in a `docker image pull`, -`docker container run`, or `docker service create` must be signed by a key corresponding -to a member of the "CI" team. In this case, the only member is the "jenkins" user. - -## Create keys for the Jenkins user - -The signing policy implementation uses the certificates issued in user client bundles -to connect a signature to a user. Using an incognito browser window (or otherwise), -log in to the "jenkins" user account you created earlier. Download a client bundle for -this user. It is also recommended to change the description associated with the public -key stored in UCP such that you can identify in the future which key is being used for -signing. - -Each time a user retrieves a new client bundle, a new keypair is generated. It is therefore -necessary to keep track of a specific bundle that a user chooses to designate as their signing bundle. - -Once you have decompressed the client bundle, the only two files you need for the purposes -of signing are `cert.pem` and `key.pem`. These represent the public and private parts of -the user's signing identity respectively. We will load the `key.pem` file onto the Jenkins -servers, and use `cert.pem` to create delegations for the "jenkins" user in our -Trusted Collection. - -## Prepare the Jenkins server - -### Load `key.pem` on Jenkins - -You will need to use the notary client to load keys onto your Jenkins server. Simply run -`notary -d /path/to/.docker/trust key import /path/to/key.pem`. You will be asked to set -a password to encrypt the key on disk. For automated signing, this password can be configured -into the environment under the variable name `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE`. The `-d` -flag to the command specifies the path to the `trust` subdirectory within the server's `docker` -configuration directory. Typically this is found at `~/.docker/trust`. - -### Enable content trust - -There are two ways to enable content trust: globally, and per operation. To enabled content -trust globally, set the environment variable `DOCKER_CONTENT_TRUST=1`. To enable on a per -operation basis, wherever you run `docker image push` in your Jenkins scripts, add the flag -`--disable-content-trust=false`. You may wish to use this second option if you only want -to sign some images. - -The Jenkins server is now prepared to sign images, but we need to create delegations referencing -the key to give it the necessary permissions. - -## Initialize a repository - -Any commands displayed in this section should _not_ be run from the Jenkins server. You -will most likely want to run them from your local system. - -If this is a new repository, create it in Docker Trusted Registry (DTR) or Docker Hub, -depending on which you use to store your images, before proceeding further. - -We will now initialize the trust data and create the delegation that provides the Jenkins -key with permissions to sign content. The following commands initialize the trust data and -rotate snapshotting responsibilities to the server. This is necessary to ensure human involvement -is not required to publish new content. - -``` -notary -s https://my_notary_server.com -d ~/.docker/trust init my_repository -notary -s https://my_notary_server.com -d ~/.docker/trust key rotate my_repository snapshot -r -notary -s https://my_notary_server.com -d ~/.docker/trust publish my_repository -``` - -The `-s` flag specifies the server hosting a notary service. If you are operating against -Docker Hub, this will be `https://notary.docker.io`. If you are operating against your own DTR -instance, this will be the same hostname you use in image names when running docker commands preceded -by the `https://` scheme. For example, if you would run `docker image push my_dtr:4443/me/an_image` the value -of the `-s` flag would be expected to be `https://my_dtr:4443`. - -If you use DTR, the name of the repository should be identical to the full name you use -in a `docker image push` command. If you use Docker Hub, the name you use in a `docker image push` -must be preceded by `docker.io/`. For instance, if you ran `docker image push me/alpine`, you then -use `notary init docker.io/me/alpine`. - -For brevity, we will exclude the `-s` and `-d` flags from subsequent command, but be aware you -will still need to provide them for the commands to work correctly. - -Now that the repository is initialized, we need to create the delegations for Jenkins. Docker -Content Trust treats a delegation role called `targets/releases` specially. It considers this -delegation to contain the canonical list of published images for the repository. For this reason, -you should add all users to this delegation with the following command: - -``` -notary delegation add my_repository targets/releases --all-paths /path/to/cert.pem -``` - -This solves a number of prioritization problems that would result from the need to determine -which delegation should ultimately be trusted for a specific image. However, since any user -can sign the `targets/releases` role it is not trusted -in determining if a signing policy has been met. Therefore, you also need to create a -delegation specifically for Jenkins: - -``` -notary delegation add my_repository targets/jenkins --all-paths /path/to/cert.pem -``` - -We will then publish both these updates (remember to add the correct `-s` and `-d` flags): - -``` -notary publish my_repository -``` - -Informational (Advanced): If we included the `targets/releases` role in determining if a signing policy -had been met, we would run into the situation of images being opportunistically deployed when -an appropriate user signs. In the scenario we have described so far, only images signed by -the "CI" team (containing only the "jenkins" user) should be deployable. If a user "Moby" could -also sign images but was not part of the "CI" team, they might sign and publish a new `targets/releases` -that contained their image. UCP would refuse to deploy this image because it was not signed -by the "CI" team. However, the next time Jenkins published an image, it would update and sign -the `targets/releases` role as whole, enabling "Moby" to deploy their image. - -## Conclusion - -With the Trusted Collection initialized, and delegations created, the Jenkins server will -now use the key we imported to sign any images we push to this repository. - -Through either the Docker CLI, or the UCP browser interface, we will find that any images -that do not meet our signing policy cannot be used. The signing policy we set up requires -that the "CI" team must have signed any image we attempt to `docker image pull`, `docker container run`, -or `docker service create`, and the only member of that team is the "jenkins" user. This -restricts us to only running images that were published by our Jenkins CI system. diff --git a/datacenter/ucp/3.0/guides/admin/install/install-on-azure.md b/datacenter/ucp/3.0/guides/admin/install/install-on-azure.md index 1d256bf6c8..d08c44349b 100644 --- a/datacenter/ucp/3.0/guides/admin/install/install-on-azure.md +++ b/datacenter/ucp/3.0/guides/admin/install/install-on-azure.md @@ -272,10 +272,14 @@ for each VM in the VM scale set. ## Install UCP -Use the following command to install UCP on the manager node. -The `--pod-cidr` option maps to the IP address range that you configured for -the subnets in the previous sections, and the `--host-address` maps to the -IP address of the master node. +Run the following command to install UCP on a manager node. The `--pod-cidr` +option maps to the IP address range that you have configured for the Azure +subnet, and the `--host-address` maps to the private IP address of the master node + +> Note: The `pod-cidr` range must match the Azure Virtual Network's Subnet host. +> attached the hosts. For example, if the Azure Virtual Network had the range +> `172.0.0.0/16` with Virtual Machines provisioned on an Azure Subnet of +> `172.0.1.0/24`, then the Pod CIDR should also be `172.0.1.0/24`. ```bash docker container run --rm -it \ @@ -283,8 +287,7 @@ docker container run --rm -it \ -v /var/run/docker.sock:/var/run/docker.sock \ {{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} install \ --host-address \ - --interactive \ - --swarm-port 3376 \ --pod-cidr \ - --cloud-provider Azure + --cloud-provider Azure \ + --interactive ``` \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/admin/install/upgrade.md b/datacenter/ucp/3.0/guides/admin/install/upgrade.md index 47a6b2271d..a01974bb97 100644 --- a/datacenter/ucp/3.0/guides/admin/install/upgrade.md +++ b/datacenter/ucp/3.0/guides/admin/install/upgrade.md @@ -1,11 +1,11 @@ --- -title: Upgrade to UCP 2.2 +title: Upgrade to UCP 3.0 description: Learn how to upgrade Docker Universal Control Plane with minimal impact to your users. keywords: UCP, upgrade, update --- This page guides you in upgrading Docker Universal Control Plane (UCP) to -version 2.2. +version 3.0. Before upgrading to a new version of UCP, check the [release notes](../../release-notes/index.md) for this version for information @@ -37,8 +37,8 @@ This allows you to recover if something goes wrong during the upgrade process. > Upgrading and backup archives > > The backup archive is version-specific, so you can't use it during the -> upgrade process. For example, if you create a backup archive for a UCP 2.1 -> swarm, you can't use the archive file after you upgrade to UCP 2.2. +> upgrade process. For example, if you create a backup archive for a UCP 2.2 +> swarm, you can't use the archive file after you upgrade to UCP 3.0. ## Upgrade Docker Engine @@ -112,13 +112,13 @@ all the nodes managed by UCP are healthy. ## Recommended upgrade paths -If you're running a UCP version that's lower than 2.1, first upgrade to the -latest 2.1 version, then upgrade to 2.2. Use these rules for your upgrade -path to UCP 2.2: +If you're running a UCP version that's lower than 2.2, first upgrade to the +latest 2.2 version, then upgrade to 3.0. Use these rules for your upgrade +path to UCP 3.0: -- From UCP 1.1: UCP 1.1 -> UCP 2.1 -> UCP 2.2 -- From UCP 2.0: UCP 2.0 -> UCP 2.1 -> UCP 2.2 -- From UCP 2.1: UCP 2.1 -> UCP 2.2 +- From UCP 1.1: UCP 1.1 -> UCP 2.2 -> UCP 3.0 +- From UCP 2.0: UCP 2.0 -> UCP 2.2 -> UCP 3.0 +- From UCP 2.2: UCP 2.2 -> UCP 3.0 ## Where to go next diff --git a/datacenter/ucp/3.0/guides/architecture.md b/datacenter/ucp/3.0/guides/architecture.md index f74bbb9464..4afa6d38f9 100644 --- a/datacenter/ucp/3.0/guides/architecture.md +++ b/datacenter/ucp/3.0/guides/architecture.md @@ -5,13 +5,13 @@ keywords: ucp, architecture --- Universal Control Plane is a containerized application that runs on -[Docker Enterprise Edition](/ee/index.md) and extends its functionality -to make it easier to deploy, configure, and monitor your applications at scale. +[Docker Enterprise Edition](/ee/index.md), extending its functionality +to simplify the deployment, configuration, and monitoring of your applications at scale. UCP also secures Docker with role-based access control so that only authorized users can make changes and deploy applications to your Docker cluster. -![](images/architecture-1.svg) +![](images/ucp-architecture-1.svg){: .with-border} Once Universal Control Plane (UCP) instance is deployed, developers and IT operations no longer interact with Docker Engine directly, but interact with @@ -25,7 +25,7 @@ the Docker CLI client and Docker Compose. Docker UCP leverages the clustering and orchestration functionality provided by Docker. -![](images/architecture-2.svg) +![](images/ucp-architecture-2.svg){: .with-border} A swarm is a collection of nodes that are in the same Docker cluster. [Nodes](/engine/swarm/key-concepts.md) in a Docker swarm operate in one of two @@ -66,38 +66,89 @@ on a node depend on whether the node is a manager or a worker. > on Windows, the `ucp-agent` component is named `ucp-agent-win`. > [Learn about architecture-specific images](admin/install/architecture-specific-images.md). +Internally, UCP uses the following components: + +* Calico 3.0.1 +* Kubernetes 1.8.11 + ### UCP components in manager nodes Manager nodes run all UCP services, including the web UI and data stores that persist the state of UCP. These are the UCP services running on manager nodes: -| UCP component | Description | -|:--------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ucp-agent | Monitors the node and ensures the right UCP services are running | -| ucp-reconcile | When ucp-agent detects that the node is not running the right UCP components, it starts the ucp-reconcile container to converge the node to its desired state. It is expected for the ucp-reconcile container to remain in an exited state when the node is healthy. | -| ucp-auth-api | The centralized service for identity and authentication used by UCP and DTR | -| ucp-auth-store | Stores authentication configurations and data for users, organizations, and teams | -| ucp-auth-worker | Performs scheduled LDAP synchronizations and cleans authentication and authorization data | -| ucp-client-root-ca | A certificate authority to sign client bundles | -| ucp-cluster-root-ca | A certificate authority used for TLS communication between UCP components | -| ucp-controller | The UCP web server | -| ucp-dsinfo | Docker system information collection script to assist with troubleshooting | -| ucp-kv | Used to store the UCP configurations. Don't use it in your applications, since it's for internal use only | -| ucp-metrics | Used to collect and process metrics for a node, like the disk space available | -| ucp-proxy | A TLS proxy. It allows secure access to the local Docker Engine to UCP components | -| ucp-swarm-manager | Used to provide backwards-compatibility with Docker Swarm | +| UCP component | Description | +|:--------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| k8s_calico-kube-controllers | A cluster-scoped Kubernetes controller used to coordinate Calico networking. Runs on one manager node only. | +| k8s_calico-node | The Calico node agent, which coordinates networking fabric according to the cluster-wide Calico configuration. Part of the `calico-node` daemonset. Runs on all nodes. Configure the CNI plugin by using the `--cni-installer-url` flag. If this flag isn't set, UCP uses Calico as the default CNI plugin. | +| k8s_install-cni_calico-node | A container that's responsible for installing the Calico CNI plugin binaries and configuration on each host. Part of the `calico-node` daemonset. Runs on all nodes. | +| k8s_POD_calico-node | Pause container for the `calico-node` pod. | +| k8s_POD_calico-kube-controllers | Pause container for the `calico-kube-controllers` pod. | +| k8s_POD_compose | Pause container for the `compose` pod. | +| k8s_POD_kube-dns | Pause container for the `kube-dns` pod. | +| k8s_ucp-dnsmasq-nanny | A dnsmasq instance used in the Kubernetes DNS Service. Part of the `kube-dns` deployment. Runs on one manager node only. | +| k8s_ucp-kube-compose | A custom Kubernetes resource component that's responsible for translating Compose files into Kubernetes constructs. Part of the `compose` deployment. Runs on one manager node only. | +| k8s_ucp-kube-dns | The main Kubernetes DNS Service, used by pods to [resolve service names](https://v1-8.docs.kubernetes.io/docs/concepts/services-networking/dns-pod-service/). Part of the `kube-dns` deployment. Runs on one manager node only. Provides service discovery for Kubernetes services and pods. A set of three containers deployed via Kubernetes as a single pod. | +| k8s_ucp-kubedns-sidecar | Health checking and metrics daemon of the Kubernetes DNS Service. Part of the `kube-dns` deployment. Runs on one manager node only. | +| ucp-agent | Monitors the node and ensures the right UCP services are running. | +| ucp-auth-api | The centralized service for identity and authentication used by UCP and DTR. | +| ucp-auth-store | Stores authentication configurations and data for users, organizations, and teams. | +| ucp-auth-worker | Performs scheduled LDAP synchronizations and cleans authentication and authorization data. | +| ucp-client-root-ca | A certificate authority to sign client bundles. | +| ucp-cluster-root-ca | A certificate authority used for TLS communication between UCP components. | +| ucp-controller | The UCP web server. | +| ucp-dsinfo | Docker system information collection script to assist with troubleshooting. | +| ucp-interlock | Monitors swarm workloads configured to use Layer 7 routing. Only runs when you enable Layer 7 routing. | +| ucp-interlock-proxy | A service that provides load balancing and proxying for swarm workloads. Only runs when you enable Layer 7 routing. | +| ucp-kube-apiserver | A master component that serves the Kubernetes API. It persists its state in `etcd` directly, and all other components communicate with API server directly. | +| ucp-kube-controller-manager | A master component that manages the desired state of controllers and other Kubernetes objects. It monitors the API server and performs background tasks when needed. | +| ucp-kubelet | The Kubernetes node agent running on every node, which is responsible for running Kubernetes pods, reporting the health of the node, and monitoring resource usage. | +| ucp-kube-proxy | The networking proxy running on every node, which enables pods to contact Kubernetes services and other pods, via cluster IP addresses. | +| ucp-kube-scheduler | A master component that handles scheduling of pods. It communicates with the API server only to obtain workloads that need to be scheduled. | +| ucp-kv | Used to store the UCP configurations. Don't use it in your applications, since it's for internal use only. Also used by Kubernetes components. | +| ucp-metrics | Used to collect and process metrics for a node, like the disk space available. | +| ucp-proxy | A TLS proxy. It allows secure access to the local Docker Engine to UCP components. | +| ucp-reconcile | When ucp-agent detects that the node is not running the right UCP components, it starts the ucp-reconcile container to converge the node to its desired state. It is expected for the ucp-reconcile container to remain in an exited state when the node is healthy. | +| ucp-swarm-manager | Used to provide backwards-compatibility with Docker Swarm. | + ### UCP components in worker nodes Worker nodes are the ones where you run your applications. These are the UCP services running on worker nodes: -| UCP component | Description | -|:--------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ucp-agent | Monitors the node and ensures the right UCP services are running | -| ucp-dsinfo | Docker system information collection script to assist with troubleshooting | -| ucp-reconcile | When ucp-agent detects that the node is not running the right UCP components, it starts the ucp-reconcile container to converge the node to its desired state. It is expected for the ucp-reconcile container to remain in an exited state when the node is healthy. | -| ucp-proxy | A TLS proxy. It allows secure access to the local Docker Engine to UCP components | +| UCP component | Description | +|:----------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| k8s_calico-node | The Calico node agent, which coordinates networking fabric according to the cluster-wide Calico configuration. Part of the `calico-node` daemonset. Runs on all nodes. | +| k8s_install-cni_calico-node | A container that's responsible for installing the Calico CNI plugin binaries and configuration on each host. Part of the `calico-node` daemonset. Runs on all nodes. | +| k8s_POD_calico-node | "Pause" container for the Calico-node pod. By default, this container is hidden, but you can see it by running `docker ps -a`. | +| ucp-agent | Monitors the node and ensures the right UCP services are running | +| ucp-interlock-extension | Helper service that reconfigures the ucp-interlock-proxy service based on the swarm workloads that are running. | +| ucp-interlock-proxy | A service that provides load balancing and proxying for swarm workloads. Only runs when you enable Layer 7 routing. | +| ucp-dsinfo | Docker system information collection script to assist with troubleshooting | +| ucp-kubelet | The kubernetes node agent running on every node, which is responsible for running Kubernetes pods, reporting the health of the node, and monitoring resource usage | +| ucp-kube-proxy | The networking proxy running on every node, which enables pods to contact Kubernetes services and other pods, via cluster IP addresses | +| ucp-reconcile | When ucp-agent detects that the node is not running the right UCP components, it starts the ucp-reconcile container to converge the node to its desired state. It is expected for the ucp-reconcile container to remain in an exited state when the node is healthy. | +| ucp-proxy | A TLS proxy. It allows secure access to the local Docker Engine to UCP components | + +## Pause containers + +Every pod in Kubernetes has a _pause_ container, which is an "empty" container +that bootstraps the pod to establish all of the namespaces. Pause containers +hold the cgroups, reservations, and namespaces of a pod before its individual +containers are created. The pause container's image is always present, so the +allocation of the pod's resources is instantaneous. + +By default, pause containers are hidden, but you can see them by running +`docker ps -a`. + +``` +docker ps -a | grep -I pause + +8c9707885bf6 dockereng/ucp-pause:3.0.0-6d332d3 "/pause" 47 hours ago Up 47 hours k8s_POD_calico-kube-controllers-559f6948dc-5c84l_kube-system_d00e5130-1bf4-11e8-b426-0242ac110011_0 +258da23abbf5 dockereng/ucp-pause:3.0.0-6d332d3 "/pause" 47 hours ago Up 47 hours k8s_POD_kube-dns-6d46d84946-tqpzr_kube-system_d63acec6-1bf4-11e8-b426-0242ac110011_0 +2e27b5d31a06 dockereng/ucp-pause:3.0.0-6d332d3 "/pause" 47 hours ago Up 47 hours k8s_POD_compose-698cf787f9-dxs29_kube-system_d5866b3c-1bf4-11e8-b426-0242ac110011_0 +5d96dff73458 dockereng/ucp-pause:3.0.0-6d332d3 "/pause" 47 hours ago Up 47 hours k8s_POD_calico-node-4fjgv_kube-system_d043a0ea-1bf4-11e8-b426-0242ac110011_0 +``` ## Volumes used by UCP @@ -129,6 +180,16 @@ driver. By default, the data for these volumes can be found at `/var/lib/docker/volumes//_data`. +## Configurations use by UCP + +| Configuration name | Description | +|:-------------------------------|:-------------------------------------------------------------------------------------------------| +| com.docker.interlock.extension | Configuration for the Interlock extension service that monitors and configures the proxy service | +| com.docker.interlock.proxy | Configuration for the service responsible for handling user requests and routing them | +| com.docker.license | The Docker EE license | +| com.docker.ucp.config | The UCP controller configuration. Most of the settings available on the UCP UI are stored here | +| com.docker.ucp.interlock.conf | Configuration for the core Interlock service | + ## How you interact with UCP There are two ways to interact with UCP: the web UI or the CLI. @@ -136,17 +197,16 @@ There are two ways to interact with UCP: the web UI or the CLI. You can use the UCP web UI to manage your swarm, grant and revoke user permissions, deploy, configure, manage, and monitor your applications. -![](images/architecture-3.svg) +![](images/ucp-architecture-3.svg){: .with-border} UCP also exposes the standard Docker API, so you can continue using existing tools like the Docker CLI client. Since UCP secures your cluster with role-based access control, you need to configure your Docker CLI client and other client tools to authenticate your requests using -[client certificates](user/access-ucp/index.md) that you can download +[client certificates](user-access/index.md) that you can download from your UCP profile page. - ## Where to go next -* [System requirements](admin/install/system-requirements.md) -* [Plan your installation](admin/install/system-requirements.md) +- [System requirements](admin/install/system-requirements.md) +- [Plan your installation](admin/install/plan-installation.md) diff --git a/datacenter/ucp/3.0/guides/images/change-orchestrator-for-node-1.png b/datacenter/ucp/3.0/guides/images/change-orchestrator-for-node-1.png new file mode 100644 index 0000000000..d625a5cd8e Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/change-orchestrator-for-node-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/cli-based-access-2.png b/datacenter/ucp/3.0/guides/images/cli-based-access-2.png new file mode 100644 index 0000000000..c4067603d9 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/cli-based-access-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/cli-based-access-3.png b/datacenter/ucp/3.0/guides/images/cli-based-access-3.png new file mode 100644 index 0000000000..5d274e7207 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/cli-based-access-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/client-bundle.png b/datacenter/ucp/3.0/guides/images/client-bundle.png new file mode 100644 index 0000000000..e4a419ada3 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/client-bundle.png differ diff --git a/datacenter/ucp/3.0/guides/images/create-service-account-1.png b/datacenter/ucp/3.0/guides/images/create-service-account-1.png new file mode 100644 index 0000000000..e850b04384 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/create-service-account-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/create-service-account-2.png b/datacenter/ucp/3.0/guides/images/create-service-account-2.png new file mode 100644 index 0000000000..278ed3da9b Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/create-service-account-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/create-service-account-3.png b/datacenter/ucp/3.0/guides/images/create-service-account-3.png new file mode 100644 index 0000000000..f1bba1a46a Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/create-service-account-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/custom-role-30.png b/datacenter/ucp/3.0/guides/images/custom-role-30.png new file mode 100644 index 0000000000..6143991782 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/custom-role-30.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-a-service-5.png b/datacenter/ucp/3.0/guides/images/deploy-a-service-5.png new file mode 100644 index 0000000000..8e465aa42f Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-a-service-5.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-compose-kubernetes-1.png b/datacenter/ucp/3.0/guides/images/deploy-compose-kubernetes-1.png new file mode 100644 index 0000000000..e2877a88be Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-compose-kubernetes-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-compose-kubernetes-2.png b/datacenter/ucp/3.0/guides/images/deploy-compose-kubernetes-2.png new file mode 100644 index 0000000000..18454e3b28 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-compose-kubernetes-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-compose-kubernetes-3.png b/datacenter/ucp/3.0/guides/images/deploy-compose-kubernetes-3.png new file mode 100644 index 0000000000..dfc731d7ed Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-compose-kubernetes-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-ingress-controller-1.png b/datacenter/ucp/3.0/guides/images/deploy-ingress-controller-1.png new file mode 100644 index 0000000000..f9b13475bf Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-ingress-controller-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-ingress-controller-2.png b/datacenter/ucp/3.0/guides/images/deploy-ingress-controller-2.png new file mode 100644 index 0000000000..ae4c2d5273 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-ingress-controller-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-ingress-controller-3.png b/datacenter/ucp/3.0/guides/images/deploy-ingress-controller-3.png new file mode 100644 index 0000000000..6af93ab000 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-ingress-controller-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-1.png b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-1.png new file mode 100644 index 0000000000..31eb5a1cdd Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-2.png b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-2.png new file mode 100644 index 0000000000..287ca51080 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-3.png b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-3.png new file mode 100644 index 0000000000..4717b49611 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-4.png b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-4.png new file mode 100644 index 0000000000..c729de596e Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-4.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-5.png b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-5.png new file mode 100644 index 0000000000..ce7b501568 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-kubernetes-workload-5.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-1.png b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-1.png new file mode 100644 index 0000000000..c3e79b02d3 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-2.png b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-2.png new file mode 100644 index 0000000000..ef6298e086 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-3.png b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-3.png new file mode 100644 index 0000000000..6cd2861668 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-4.png b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-4.png new file mode 100644 index 0000000000..bd5ff0b29e Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-4.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-5.png b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-5.png new file mode 100644 index 0000000000..e2b5b332ee Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-multi-service-app-5.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-stack-to-collection-1.png b/datacenter/ucp/3.0/guides/images/deploy-stack-to-collection-1.png new file mode 100644 index 0000000000..06ee08c838 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-stack-to-collection-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/deploy-stack-to-collection-2.png b/datacenter/ucp/3.0/guides/images/deploy-stack-to-collection-2.png new file mode 100644 index 0000000000..6741c4fd46 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/deploy-stack-to-collection-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/interlock-architecture-1.svg b/datacenter/ucp/3.0/guides/images/interlock-architecture-1.svg new file mode 100644 index 0000000000..83e759938a --- /dev/null +++ b/datacenter/ucp/3.0/guides/images/interlock-architecture-1.svg @@ -0,0 +1,204 @@ + + + + interlock-architecture-1 + Created with Sketch. + + + + + + + + + + + + + Docker swarm managed with UCP + + + + + + + + UCP + + + + + + interlock-proxy:80 + + + + + + + worker node + + + + + + + + + + UCP + + + + + + interlock-proxy:80 + + + + + + + worker node + + + + + + + + + + UCP + + + + + + interlock-extension + + + + + + wordpress:8000 + + + + + + + worker node + + + + + + + + + + + + UCP + + + + + + ucp-interlock + + + + + + + manager node + + + + + + + + + + + + your load balancer + + + + + + + + + + + + + + + + + + + + + http://wordpress.example.org + + + + + + + wordpress-net + + + + + + + + + + + + + + + + + + + + + + + + + + + ucp-interlock + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/images/interlock-default-service-1.png b/datacenter/ucp/3.0/guides/images/interlock-default-service-1.png new file mode 100644 index 0000000000..5c63a95e94 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/interlock-default-service-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/interlock-default-service-2.png b/datacenter/ucp/3.0/guides/images/interlock-default-service-2.png new file mode 100644 index 0000000000..b12883d062 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/interlock-default-service-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/interlock-deploy-production-1.svg b/datacenter/ucp/3.0/guides/images/interlock-deploy-production-1.svg new file mode 100644 index 0000000000..48ccb3f7ca --- /dev/null +++ b/datacenter/ucp/3.0/guides/images/interlock-deploy-production-1.svg @@ -0,0 +1,207 @@ + + + + interlock-deploy-production-1 + Created with Sketch. + + + + + + + + Docker swarm managed with UCP + + + + + + node-6 + + + + + UCP + + + + + + interlock-proxy:80 + + + + + + + worker node + + + + + + + + node-5 + + + + + UCP + + + + + + interlock-proxy:80 + + + + + interlock-proxy:80 + + + + + + + worker node + + + + + + + + node-4 + + + + + UCP + + + + + + interlock-extension + + + + + + wordpress:8000 + + + + + + + worker node + + + + + + + + + + node-3 + + + + + UCP + + + + + + + manager node + + + + + + + + node-2 + + + + + UCP + + + + + + + manager node + + + + + + + + node-1 + + + + + UCP + + + + + + ucp-interlock + + + + + + + manager node + + + + + + + + + + + + your load balancer + + + + + + + + + + + + + + + + + + + + + http://wordpress.example.org + + + + + + \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/images/interlock-install-1.svg b/datacenter/ucp/3.0/guides/images/interlock-install-1.svg new file mode 100644 index 0000000000..649439a15d --- /dev/null +++ b/datacenter/ucp/3.0/guides/images/interlock-install-1.svg @@ -0,0 +1,198 @@ + + + + use-domain-names-1 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 192.168.99.104 + + + + + + worker node + + + + + + + UCP + + + + + + wordpress:8000 + + + + + + + 192.168.99.103 + + + + + + worker node + + + + + + + UCP + + + + + + + + + 192.168.99.102 + + + + + + manager node + + + + + + + UCP + + + + + + + 192.168.99.101 + + + + + + manager node + + + + + + + UCP + + + + + + + 192.168.99.100 + + + + + + manager node + + + + + + + UCP + + + + + + + + + + + swarm routing mesh + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 192.168.99.100:8000 + + + + + + \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/images/interlock-install-2.svg b/datacenter/ucp/3.0/guides/images/interlock-install-2.svg new file mode 100644 index 0000000000..070eeb9340 --- /dev/null +++ b/datacenter/ucp/3.0/guides/images/interlock-install-2.svg @@ -0,0 +1,198 @@ + + + + use-domain-names-2 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 192.168.99.104 + + + + + + worker node + + + + + + + UCP + + + + + + wordpress:8000 + + + + + + + 192.168.99.103 + + + + + + worker node + + + + + + + UCP + + + + + + + + + 192.168.99.102 + + + + + + manager node + + + + + + + UCP + + + + + + + 192.168.99.101 + + + + + + manager node + + + + + + + UCP + + + + + + + 192.168.99.100 + + + + + + manager node + + + + + + + UCP + + + + + + + + + + + HTTP routing mesh + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + wordpress.example.org:80 + + + + + + \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/images/interlock-install-3.png b/datacenter/ucp/3.0/guides/images/interlock-install-3.png new file mode 100644 index 0000000000..9ecc24f6fc Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/interlock-install-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/interlock-overview-1.svg b/datacenter/ucp/3.0/guides/images/interlock-overview-1.svg new file mode 100644 index 0000000000..20bbc751d1 --- /dev/null +++ b/datacenter/ucp/3.0/guides/images/interlock-overview-1.svg @@ -0,0 +1,180 @@ + + + + interlock-overview-1 + Created with Sketch. + + + + + + + + + + Docker swarm managed with UCP + + + + + + node-5 + + + + + + worker node + + + + + + + UCP + + + + + + wordpress:8000 + + + + + + + node-4 + + + + + + worker node + + + + + + + UCP + + + + + + + + + node-3 + + + + + + manager node + + + + + + + UCP + + + + + + + node-2 + + + + + + manager node + + + + + + + UCP + + + + + + + node-1 + + + + + + manager node + + + + + + + UCP + + + + + + + + + + + swarm routing mesh + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://node-5:8000 + + + + + \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/images/interlock-overview-2.svg b/datacenter/ucp/3.0/guides/images/interlock-overview-2.svg new file mode 100644 index 0000000000..8f9b9ad0d7 --- /dev/null +++ b/datacenter/ucp/3.0/guides/images/interlock-overview-2.svg @@ -0,0 +1,186 @@ + + + + interlock-overview-2 + Created with Sketch. + + + + + + + + + + Docker swarm managed with UCP + + + + + + node-5 + + + + + + worker node + + + + + + + UCP + + + + + + wordpress:8000 + + + + + + + node-4 + + + + + + worker node + + + + + + + UCP + + + + + + + + + node-3 + + + + + + manager node + + + + + + + UCP + + + + + + + node-2 + + + + + + manager node + + + + + + + UCP + + + + + + + node-1 + + + + + + manager node + + + + + + + UCP + + + + + + + + + + + swarm routing mesh + + + + + + layer 7 routing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://wordpress.example.org + + + + + \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/images/interlock-tls-1.png b/datacenter/ucp/3.0/guides/images/interlock-tls-1.png new file mode 100644 index 0000000000..d49625d287 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/interlock-tls-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/interlock-tls-2.png b/datacenter/ucp/3.0/guides/images/interlock-tls-2.png new file mode 100644 index 0000000000..d906147e02 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/interlock-tls-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/interlock-tls-3.png b/datacenter/ucp/3.0/guides/images/interlock-tls-3.png new file mode 100644 index 0000000000..151055ada7 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/interlock-tls-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/isolate-nodes-10.png b/datacenter/ucp/3.0/guides/images/isolate-nodes-10.png new file mode 100644 index 0000000000..a997704510 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/isolate-nodes-10.png differ diff --git a/datacenter/ucp/3.0/guides/images/isolate-nodes-5.png b/datacenter/ucp/3.0/guides/images/isolate-nodes-5.png new file mode 100644 index 0000000000..59f74cf267 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/isolate-nodes-5.png differ diff --git a/datacenter/ucp/3.0/guides/images/isolate-nodes-6.png b/datacenter/ucp/3.0/guides/images/isolate-nodes-6.png new file mode 100644 index 0000000000..2674a02259 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/isolate-nodes-6.png differ diff --git a/datacenter/ucp/3.0/guides/images/isolate-nodes-7.png b/datacenter/ucp/3.0/guides/images/isolate-nodes-7.png new file mode 100644 index 0000000000..f6a4bedbe9 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/isolate-nodes-7.png differ diff --git a/datacenter/ucp/3.0/guides/images/isolate-nodes-8.png b/datacenter/ucp/3.0/guides/images/isolate-nodes-8.png new file mode 100644 index 0000000000..66c62569da Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/isolate-nodes-8.png differ diff --git a/datacenter/ucp/3.0/guides/images/isolate-nodes-9.png b/datacenter/ucp/3.0/guides/images/isolate-nodes-9.png new file mode 100644 index 0000000000..c2bfd3ed83 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/isolate-nodes-9.png differ diff --git a/datacenter/ucp/3.0/guides/images/isolate-volumes-0.png b/datacenter/ucp/3.0/guides/images/isolate-volumes-0.png new file mode 100644 index 0000000000..70a8c16ff5 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/isolate-volumes-0.png differ diff --git a/datacenter/ucp/3.0/guides/images/isolate-volumes-0a.png b/datacenter/ucp/3.0/guides/images/isolate-volumes-0a.png new file mode 100644 index 0000000000..7116bb0ddb Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/isolate-volumes-0a.png differ diff --git a/datacenter/ucp/3.0/guides/images/join-nodes-to-cluster-1.png b/datacenter/ucp/3.0/guides/images/join-nodes-to-cluster-1.png new file mode 100644 index 0000000000..c522d4d64d Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/join-nodes-to-cluster-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/join-nodes-to-cluster-2.png b/datacenter/ucp/3.0/guides/images/join-nodes-to-cluster-2.png new file mode 100644 index 0000000000..7e07794d2e Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/join-nodes-to-cluster-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/join-nodes-to-cluster-3.png b/datacenter/ucp/3.0/guides/images/join-nodes-to-cluster-3.png new file mode 100644 index 0000000000..b2a475e2b5 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/join-nodes-to-cluster-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/join-windows-nodes-to-cluster-1.png b/datacenter/ucp/3.0/guides/images/join-windows-nodes-to-cluster-1.png new file mode 100644 index 0000000000..3519ffb121 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/join-windows-nodes-to-cluster-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/kube-create-role.png b/datacenter/ucp/3.0/guides/images/kube-create-role.png new file mode 100644 index 0000000000..a7c56e7e32 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/kube-create-role.png differ diff --git a/datacenter/ucp/3.0/guides/images/kube-grant-rolebinding.png b/datacenter/ucp/3.0/guides/images/kube-grant-rolebinding.png new file mode 100644 index 0000000000..e8c739273d Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/kube-grant-rolebinding.png differ diff --git a/datacenter/ucp/3.0/guides/images/kube-grant-roleselect.png b/datacenter/ucp/3.0/guides/images/kube-grant-roleselect.png new file mode 100644 index 0000000000..e72d915aad Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/kube-grant-roleselect.png differ diff --git a/datacenter/ucp/3.0/guides/images/kube-grant-wizard.png b/datacenter/ucp/3.0/guides/images/kube-grant-wizard.png new file mode 100644 index 0000000000..974b9f312e Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/kube-grant-wizard.png differ diff --git a/datacenter/ucp/3.0/guides/images/kube-rbac-grants.png b/datacenter/ucp/3.0/guides/images/kube-rbac-grants.png new file mode 100644 index 0000000000..9cb1bcfdc4 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/kube-rbac-grants.png differ diff --git a/datacenter/ucp/3.0/guides/images/kube-rbac-roles.png b/datacenter/ucp/3.0/guides/images/kube-rbac-roles.png new file mode 100644 index 0000000000..a6cb551bf0 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/kube-rbac-roles.png differ diff --git a/datacenter/ucp/3.0/guides/images/kube-role-create.png b/datacenter/ucp/3.0/guides/images/kube-role-create.png new file mode 100644 index 0000000000..0a189e293f Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/kube-role-create.png differ diff --git a/datacenter/ucp/3.0/guides/images/kubernetes-version.png b/datacenter/ucp/3.0/guides/images/kubernetes-version.png new file mode 100644 index 0000000000..60a248e849 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/kubernetes-version.png differ diff --git a/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-1.png b/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-1.png new file mode 100644 index 0000000000..66465741e5 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-2.png b/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-2.png new file mode 100644 index 0000000000..6954506496 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-3.png b/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-3.png new file mode 100644 index 0000000000..b39138c587 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-4.png b/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-4.png new file mode 100644 index 0000000000..26b91d3f4d Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/manage-and-deploy-private-images-4.png differ diff --git a/datacenter/ucp/3.0/guides/images/manage-secrets-4a.png b/datacenter/ucp/3.0/guides/images/manage-secrets-4a.png new file mode 100644 index 0000000000..adb5d85db2 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/manage-secrets-4a.png differ diff --git a/datacenter/ucp/3.0/guides/images/migrate-kubernetes-roles-1.png b/datacenter/ucp/3.0/guides/images/migrate-kubernetes-roles-1.png new file mode 100644 index 0000000000..3bb600c12f Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/migrate-kubernetes-roles-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/migrate-kubernetes-roles-2.png b/datacenter/ucp/3.0/guides/images/migrate-kubernetes-roles-2.png new file mode 100644 index 0000000000..d609ab7f76 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/migrate-kubernetes-roles-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/overview-1.png b/datacenter/ucp/3.0/guides/images/overview-1.png new file mode 100644 index 0000000000..7bb908139f Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/overview-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/overview-2.png b/datacenter/ucp/3.0/guides/images/overview-2.png new file mode 100644 index 0000000000..22261dd985 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/overview-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/rbac-pull-images-1.png b/datacenter/ucp/3.0/guides/images/rbac-pull-images-1.png new file mode 100644 index 0000000000..9802b4cc1b Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/rbac-pull-images-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/rbac-pull-images-2.png b/datacenter/ucp/3.0/guides/images/rbac-pull-images-2.png new file mode 100644 index 0000000000..cea41ea5c3 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/rbac-pull-images-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/rbac-roles.png b/datacenter/ucp/3.0/guides/images/rbac-roles.png new file mode 100644 index 0000000000..9a4902f2ba Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/rbac-roles.png differ diff --git a/datacenter/ucp/3.0/guides/images/route-simple-app-1.png b/datacenter/ucp/3.0/guides/images/route-simple-app-1.png new file mode 100644 index 0000000000..38a4402e41 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/route-simple-app-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/saml_enabled.png b/datacenter/ucp/3.0/guides/images/saml_enabled.png new file mode 100644 index 0000000000..022c9e37fb Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/saml_enabled.png differ diff --git a/datacenter/ucp/3.0/guides/images/saml_settings.png b/datacenter/ucp/3.0/guides/images/saml_settings.png new file mode 100644 index 0000000000..89d1d437de Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/saml_settings.png differ diff --git a/datacenter/ucp/3.0/guides/images/ucp-architecture-1.svg b/datacenter/ucp/3.0/guides/images/ucp-architecture-1.svg new file mode 100644 index 0000000000..abd4a32d15 --- /dev/null +++ b/datacenter/ucp/3.0/guides/images/ucp-architecture-1.svg @@ -0,0 +1,71 @@ + + + + architecture-1 + Created with Sketch. + + + + + + + + + + cloud servers + + + + + + virtual servers + + + + + + physical servers + + + + + + + Docker EE Engine + + + + + + Universal Control Plane + + + + + + Docker Trusted Registry + + + + + + your applications + + + + + + + deploy and manage + + + + + + + + + + + + \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/images/ucp-architecture-2.svg b/datacenter/ucp/3.0/guides/images/ucp-architecture-2.svg new file mode 100644 index 0000000000..46e7833789 --- /dev/null +++ b/datacenter/ucp/3.0/guides/images/ucp-architecture-2.svg @@ -0,0 +1,166 @@ + + + + architecture-2 + Created with Sketch. + + + + + Docker swarm + + + + + + + + + worker node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP worker + + + + + + + + + + worker node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP worker + + + + + + + + + + + + manager node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP manager + + + + + + + + + + manager node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP manager + + + + + + + + + + manager node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP manager + + + + + + + + + + \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/images/ucp-architecture-3.svg b/datacenter/ucp/3.0/guides/images/ucp-architecture-3.svg new file mode 100644 index 0000000000..6a9c66a0a3 --- /dev/null +++ b/datacenter/ucp/3.0/guides/images/ucp-architecture-3.svg @@ -0,0 +1,233 @@ + + + + architecture-3 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + Docker swarm + + + + + + + + your load balancer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + worker node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP worker + + + + + + + + + + worker node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP worker + + + + + + + + + + + + manager node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP manager + + + + + + + + + + manager node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP manager + + + + + + + + + + manager node + + + + + + + Docker EE + + + + + + UCP agent + + + + + + UCP manager + + + + + + + + + + + + UI + + + + + + CLI + + + + + + + \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/images/ucp_usermgmt_users_create01.png b/datacenter/ucp/3.0/guides/images/ucp_usermgmt_users_create01.png new file mode 100644 index 0000000000..685c9d8c92 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/ucp_usermgmt_users_create01.png differ diff --git a/datacenter/ucp/3.0/guides/images/ucp_usermgmt_users_create02.png b/datacenter/ucp/3.0/guides/images/ucp_usermgmt_users_create02.png new file mode 100644 index 0000000000..936dae2e59 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/ucp_usermgmt_users_create02.png differ diff --git a/datacenter/ucp/3.0/guides/images/use-constraints-in-stack-deployment-2.png b/datacenter/ucp/3.0/guides/images/use-constraints-in-stack-deployment-2.png index 67b0e5d299..3d58cd0675 100644 Binary files a/datacenter/ucp/3.0/guides/images/use-constraints-in-stack-deployment-2.png and b/datacenter/ucp/3.0/guides/images/use-constraints-in-stack-deployment-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/use-constraints-in-stack-deployment.png b/datacenter/ucp/3.0/guides/images/use-constraints-in-stack-deployment.png index 0c041d16c2..358d15996b 100644 Binary files a/datacenter/ucp/3.0/guides/images/use-constraints-in-stack-deployment.png and b/datacenter/ucp/3.0/guides/images/use-constraints-in-stack-deployment.png differ diff --git a/datacenter/ucp/3.0/guides/images/use-externally-signed-certs-2.png b/datacenter/ucp/3.0/guides/images/use-externally-signed-certs-2.png index 071cd1e10b..b08d65659b 100644 Binary files a/datacenter/ucp/3.0/guides/images/use-externally-signed-certs-2.png and b/datacenter/ucp/3.0/guides/images/use-externally-signed-certs-2.png differ diff --git a/ee/ucp/images/use-nfs-volume-1.png b/datacenter/ucp/3.0/guides/images/use-nfs-volume-1.png similarity index 100% rename from ee/ucp/images/use-nfs-volume-1.png rename to datacenter/ucp/3.0/guides/images/use-nfs-volume-1.png diff --git a/ee/ucp/images/use-nfs-volume-2.png b/datacenter/ucp/3.0/guides/images/use-nfs-volume-2.png similarity index 100% rename from ee/ucp/images/use-nfs-volume-2.png rename to datacenter/ucp/3.0/guides/images/use-nfs-volume-2.png diff --git a/ee/ucp/images/use-nfs-volume-3.png b/datacenter/ucp/3.0/guides/images/use-nfs-volume-3.png similarity index 100% rename from ee/ucp/images/use-nfs-volume-3.png rename to datacenter/ucp/3.0/guides/images/use-nfs-volume-3.png diff --git a/ee/ucp/images/use-nfs-volume-4.png b/datacenter/ucp/3.0/guides/images/use-nfs-volume-4.png similarity index 100% rename from ee/ucp/images/use-nfs-volume-4.png rename to datacenter/ucp/3.0/guides/images/use-nfs-volume-4.png diff --git a/ee/ucp/images/use-nfs-volume-5.png b/datacenter/ucp/3.0/guides/images/use-nfs-volume-5.png similarity index 100% rename from ee/ucp/images/use-nfs-volume-5.png rename to datacenter/ucp/3.0/guides/images/use-nfs-volume-5.png diff --git a/datacenter/ucp/3.0/guides/images/view-namespace-resources-1.png b/datacenter/ucp/3.0/guides/images/view-namespace-resources-1.png new file mode 100644 index 0000000000..9fb281cda3 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/view-namespace-resources-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/view-namespace-resources-2.png b/datacenter/ucp/3.0/guides/images/view-namespace-resources-2.png new file mode 100644 index 0000000000..81f249d46e Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/view-namespace-resources-2.png differ diff --git a/datacenter/ucp/3.0/guides/images/view-namespace-resources-3.png b/datacenter/ucp/3.0/guides/images/view-namespace-resources-3.png new file mode 100644 index 0000000000..afca7bc7ea Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/view-namespace-resources-3.png differ diff --git a/datacenter/ucp/3.0/guides/images/view-namespace-resources-4.png b/datacenter/ucp/3.0/guides/images/view-namespace-resources-4.png new file mode 100644 index 0000000000..1a3e41f131 Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/view-namespace-resources-4.png differ diff --git a/datacenter/ucp/3.0/guides/images/view-namespace-resources-5.png b/datacenter/ucp/3.0/guides/images/view-namespace-resources-5.png new file mode 100644 index 0000000000..19f5336bae Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/view-namespace-resources-5.png differ diff --git a/datacenter/ucp/3.0/guides/images/web-based-access-1.png b/datacenter/ucp/3.0/guides/images/web-based-access-1.png new file mode 100644 index 0000000000..fb7304147d Binary files /dev/null and b/datacenter/ucp/3.0/guides/images/web-based-access-1.png differ diff --git a/datacenter/ucp/3.0/guides/images/web-based-access-2.png b/datacenter/ucp/3.0/guides/images/web-based-access-2.png index 65313e945a..00437d1c22 100644 Binary files a/datacenter/ucp/3.0/guides/images/web-based-access-2.png and b/datacenter/ucp/3.0/guides/images/web-based-access-2.png differ diff --git a/datacenter/ucp/3.0/guides/index.md b/datacenter/ucp/3.0/guides/index.md index a054b6794a..626dbe7c3c 100644 --- a/datacenter/ucp/3.0/guides/index.md +++ b/datacenter/ucp/3.0/guides/index.md @@ -1,41 +1,68 @@ --- title: Universal Control Plane overview -description: Learn about Docker Universal Control Plane, the enterprise-grade cluster - management solution from Docker. -keywords: ucp, overview, orchestration, clustering -redirect_from: -- /ucp/ +description: | + Learn about Docker Universal Control Plane, the enterprise-grade cluster management solution from Docker. +keywords: ucp, overview, orchestration, cluster --- Docker Universal Control Plane (UCP) is the enterprise-grade cluster management solution from Docker. You install it on-premises or in your virtual private -cloud, and it helps you manage your Docker swarm and applications through a +cloud, and it helps you manage your Docker cluster and applications through a single interface. -![](../../../images/ucp.png){: .with-border} +![](images/overview-1.png){: .with-border} -## Centralized swarm management +## Centralized cluster management With Docker, you can join up to thousands of physical or virtual machines -together to create a container cluster, or swarm, allowing you to deploy your +together to create a container cluster that allows you to deploy your applications at scale. Docker Universal Control Plane extends the -functionality provided by Docker to make it easier to manage your swarm +functionality provided by Docker to make it easier to manage your cluster from a centralized place. You can manage and monitor your container cluster using a graphical UI. -![](../../../images/try-ddc-2.png){: .with-border} +![](images/overview-2.png){: .with-border} -Since UCP exposes the standard Docker API, you can continue using the tools +## Deploy, manage, and monitor + +With Docker UCP, you can manage from a centralized place all of the computing +resources you have available, like nodes, volumes, and networks. + +You can also deploy and monitor your applications and services. + +## Built-in security and access control + +Docker UCP has its own built-in authentication mechanism and integrates with +LDAP services. It also has role-based access control (RBAC), so that you can +control who can access and make changes to your cluster and applications. +[Learn about role-based access control](authorization/index.md). + +![](images/overview-3.png){: .with-border} + +Docker UCP integrates with Docker Trusted Registry so that you can keep the +Docker images you use for your applications behind your firewall, where they +are safe and can't be tampered with. + +You can also enforce security policies and only allow running applications +that use Docker images you know and trust. + +## Use the Docker CLI client + +Because UCP exposes the standard Docker API, you can continue using the tools you already know, including the Docker CLI client, to deploy and manage your applications. -As an example, you can use the `docker info` command to check the -status of a Docker swarm managed by UCP: +For example, you can use the `docker info` command to check the status of a +cluster that's managed by UCP: -```none -$ docker info +```bash +docker info +``` +This command produces the output that you expect from the Docker EE Engine: + +```bash Containers: 38 Running: 23 Paused: 0 @@ -51,30 +78,7 @@ Managers: 1 … ``` -## Deploy, manage, and monitor - -With Docker UCP, you can manage from a centralized place all of the computing -resources you have available, like nodes, volumes, and networks. - -You can also deploy and monitor your applications and services. - -## Built-in security and access control - -Docker UCP has its own built-in authentication mechanism and integrates with -LDAP services. It also has role-based access control (RBAC), so that you can -control who can access and make changes to your swarm and applications. -[Learn about role-based access control](access-control/index.md). - -![](images/overview-3.png){: .with-border} - -Docker UCP integrates with Docker Trusted Registry so that you can keep the -Docker images you use for your applications behind your firewall, where they -are safe and can't be tampered with. - -You can also enforce security policies and only allow running applications -that use Docker images you know and trust. - ## Where to go next -* [UCP architecture](architecture.md) -* [Install UCP](admin/install/index.md) +- [Install UCP](admin/install/index.md) +- [Docker EE Platform 2.0 architecture](/ee/docker-ee-architecture.md) diff --git a/datacenter/ucp/3.0/guides/user/access-ucp/kubectl.md b/datacenter/ucp/3.0/guides/user/access-ucp/kubectl.md new file mode 100644 index 0000000000..e64e464f2b --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/access-ucp/kubectl.md @@ -0,0 +1,104 @@ +--- +title: Install the Kubernetes CLI +description: Learn how to install kubectl, the Kubernetes command-line tool, on Docker Universal Control Plane. +keywords: ucp, cli, administration, kubectl, Kubernetes +--- + +Docker EE 2.0 and higher deploys Kubernetes as part of a UCP installation. +Deploy, manage, and monitor Kubernetes workloads from the UCP dashboard. Users can +also interact with the Kubernetes deployment through the Kubernetes +command-line tool named kubectl. + +To access the UCP cluster with kubectl, install the [UCP client bundle](cli.md). + +> Kubernetes on Docker for Mac and Docker for Windows +> +> Docker for Mac and Docker for Windows provide a standalone Kubernetes server that +> runs on your development machine, with kubectl installed by default. This installation is +> separate from the Kubernetes deployment on a UCP cluster. +> Learn how to [deploy to Kubernetes on Docker for Mac](/docker-for-mac/kubernetes.md). +{: .important} + +## Install the kubectl binary + +To use kubectl, install the binary on a workstation which has access to your UCP endpoint. + +> Must install compatible version +> +> Kubernetes only guarantees compatibility with kubectl versions that are +/-1 minor versions away from the Kubernetes version. +{: .important} + +First, find which version of Kubernetes is running in your cluster. This can be found +within the Universal Control Plane dashboard or at the UCP API endpoint [version](/reference/ucp/3.0/api/). + +From the UCP dashboard, click on **About Docker EE** within the **Admin** menu in the top left corner + of the dashboard. Then navigate to **Kubernetes**. + + ![Find Kubernetes version](../../images/kubernetes-version.png){: .with-border} + +Once you have the Kubernetes version, install the kubectl client for the relevant +operating system. + + +
    +
    +``` +# Set the Kubernetes version as found in the UCP Dashboard or API +k8sversion=v1.8.11 + +# Get the kubectl binary. +curl -LO https://storage.googleapis.com/kubernetes-release/release/$k8sversion/bin/darwin/amd64/kubectl + +# Make the kubectl binary executable. +chmod +x ./kubectl + +# Move the kubectl executable to /usr/local/bin. +sudo mv ./kubectl /usr/local/bin/kubectl +``` +
    +
    +
    +``` +# Set the Kubernetes version as found in the UCP Dashboard or API +k8sversion=v1.8.11 + +# Get the kubectl binary. +curl -LO https://storage.googleapis.com/kubernetes-release/release/$k8sversion/bin/linux/amd64/kubectl + +# Make the kubectl binary executable. +chmod +x ./kubectl + +# Move the kubectl executable to /usr/local/bin. +sudo mv ./kubectl /usr/local/bin/kubectl +``` +
    +
    +
    +You can download the binary from this [link](https://storage.googleapis.com/kubernetes-release/release/v.1.8.11/bin/windows/amd64/kubectl.exe) + +If you have curl installed on your system, you use these commands in Powershell. + +```cmd +$env:k8sversion = "v1.8.11" + +curl https://storage.googleapis.com/kubernetes-release/release/$env:k8sversion/bin/windows/amd64/kubectl.exe +``` +
    +
    +
    + +## Using kubectl with a Docker EE cluster + +Docker Enterprise Edition provides users unique certificates and keys to authenticate against + the Docker and Kubernetes APIs. Instructions on how to download these certificates and how to + configure kubectl to use them can be found in [CLI-based access.](cli.md#download-client-certificates) + +## Where to go next + +- [Deploy a workload to a Kubernetes cluster](../kubernetes.md) +- [Deploy to Kubernetes on Docker for Mac](/docker-for-mac/kubernetes.md) + diff --git a/datacenter/ucp/3.0/guides/user/interlock/architecture.md b/datacenter/ucp/3.0/guides/user/interlock/architecture.md new file mode 100644 index 0000000000..801fe38188 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/architecture.md @@ -0,0 +1,73 @@ +--- +title: Interlock architecture +description: Learn more about the architecture of the layer 7 routing solution + for Docker swarm services. +keywords: routing, proxy +--- + +The layer 7 routing solution for swarm workloads is known as Interlock, and has +three components: + +* **Interlock-proxy**: This is a proxy/load-balancing service that handles the +requests from the outside world. By default this service is a containerized +NGINX deployment. +* **Interlock-extension**: This is a helper service that generates the +configuration used by the proxy service. +* **Interlock**: This is the central piece of the layer 7 routing solution. +It uses the Docker API to monitor events, and manages the extension and +proxy services. + +This is what the default configuration looks like, once you enable layer 7 +routing in UCP: + +![](../../images/interlock-architecture-1.svg) + +An Interlock service starts running on a manager node, an Interlock-extension +service starts running on a worker node, and two replicas of the +Interlock-proxy service run on worker nodes. + +If you don't have any worker nodes in your cluster, then all Interlock +components run on manager nodes. + +## Deployment lifecycle + +By default layer 7 routing is disabled, so an administrator first needs to +enable this service from the UCP web UI. + +Once that happens: + +1. UCP creates the `ucp-interlock` overlay network. +2. UCP deploys the `ucp-interlock` service and attaches it both to the Docker +socket and the overlay network that was created. This allows the Interlock +service to use the Docker API. That's also the reason why this service needs to +run on a manger node. +3. The `ucp-interlock` service starts the `ucp-interlock-extension` service +and attaches it to the `ucp-interlock` network. This allows both services +to communicate. +4. The `ucp-interlock-extension` generates a configuration to be used by +the proxy service. By default the proxy service is NGINX, so this service +generates a standard NGINX configuration. +5. The `ucp-interlock` service takes the proxy configuration and uses it to +start the `ucp-interlock-proxy` service. + +At this point everything is ready for you to start using the layer 7 routing +service with your swarm workloads. + +## Routing lifecycle + +Once the layer 7 routing service is enabled, you apply specific labels to +your swarm services. The labels define the hostnames that are routed to the +service, the ports used, and other routing configurations. + +Once you deploy or update a swarm service with those labels: + +1. The `ucp-interlock` service is monitoring the Docker API for events and +publishes the events to the `ucp-interlock-extension` service. +2. That service in turn generates a new configuration for the proxy service, +based on the labels you've added to your services. +3. The `ucp-interlock` service takes the new configuration and reconfigures the +`ucp-interlock-proxy` to start using it. + +This all happens in milliseconds and with rolling updates. Even though +services are being reconfigured, users won't notice it. + diff --git a/datacenter/ucp/3.0/guides/user/interlock/deploy/configuration-reference.md b/datacenter/ucp/3.0/guides/user/interlock/deploy/configuration-reference.md new file mode 100644 index 0000000000..ffdcfbf82b --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/deploy/configuration-reference.md @@ -0,0 +1,146 @@ +--- +title: Layer 7 routing configuration reference +description: Learn the configuration options for the UCP layer 7 routing solution +keywords: routing, proxy +--- + +Once you enable the layer 7 routing service, UCP creates the +`com.docker.ucp.interlock.conf-1` configuration and uses it to configure all +the internal components of this service. + +The configuration is managed as a TOML file. + +## Example configuration + +Here's an example of the default configuration used by UCP: + +```toml +ListenAddr = ":8080" +DockerURL = "unix:///var/run/docker.sock" +AllowInsecure = false +PollInterval = "3s" + +[Extensions] + [Extensions.default] + Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}" + ServiceName = "ucp-interlock-extension" + Args = [] + Constraints = ["node.labels.com.docker.ucp.orchestrator.swarm==true", "node.platform.os==linux"] + ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}" + ProxyServiceName = "ucp-interlock-proxy" + ProxyConfigPath = "/etc/nginx/nginx.conf" + ProxyReplicas = 2 + ProxyStopSignal = "SIGQUIT" + ProxyStopGracePeriod = "5s" + ProxyConstraints = ["node.labels.com.docker.ucp.orchestrator.swarm==true", "node.platform.os==linux"] + PublishMode = "ingress" + PublishedPort = 80 + TargetPort = 80 + PublishedSSLPort = 8443 + TargetSSLPort = 443 + [Extensions.default.Labels] + "com.docker.ucp.InstanceID" = "fewho8k85kyc6iqypvvdh3ntm" + [Extensions.default.ContainerLabels] + "com.docker.ucp.InstanceID" = "fewho8k85kyc6iqypvvdh3ntm" + [Extensions.default.ProxyLabels] + "com.docker.ucp.InstanceID" = "fewho8k85kyc6iqypvvdh3ntm" + [Extensions.default.ProxyContainerLabels] + "com.docker.ucp.InstanceID" = "fewho8k85kyc6iqypvvdh3ntm" + [Extensions.default.Config] + Version = "" + User = "nginx" + PidPath = "/var/run/proxy.pid" + MaxConnections = 1024 + ConnectTimeout = 600 + SendTimeout = 600 + ReadTimeout = 600 + IPHash = false + AdminUser = "" + AdminPass = "" + SSLOpts = "" + SSLDefaultDHParam = 1024 + SSLDefaultDHParamPath = "" + SSLVerify = "required" + WorkerProcesses = 1 + RLimitNoFile = 65535 + SSLCiphers = "HIGH:!aNULL:!MD5" + SSLProtocols = "TLSv1.2" + AccessLogPath = "/dev/stdout" + ErrorLogPath = "/dev/stdout" + MainLogFormat = "'$remote_addr - $remote_user [$time_local] \"$request\" '\n\t\t '$status $body_bytes_sent \"$http_referer\" '\n\t\t '\"$http_user_agent\" \"$http_x_forwarded_for\"';" + TraceLogFormat = "'$remote_addr - $remote_user [$time_local] \"$request\" $status '\n\t\t '$body_bytes_sent \"$http_referer\" \"$http_user_agent\" '\n\t\t '\"$http_x_forwarded_for\" $request_id $msec $request_time '\n\t\t '$upstream_connect_time $upstream_header_time $upstream_response_time';" + KeepaliveTimeout = "75s" + ClientMaxBodySize = "32m" + ClientBodyBufferSize = "8k" + ClientHeaderBufferSize = "1k" + LargeClientHeaderBuffers = "4 8k" + ClientBodyTimeout = "60s" + UnderscoresInHeaders = false +``` + +## Core configurations + +These are the configurations used for the `ucp-interlock` service. The following +options are available: + +| Option | Type | Description | +|:-------------------|:------------|:-----------------------------------------------------------------------------------------------| +| `ListenAddr` | string | Address to serve the Interlock GRPC API. Defaults to `8080`. | +| `DockerURL` | string | Path to the socket or TCP address to the Docker API. Defaults to `unix:///var/run/docker.sock` | +| `TLSCACert` | string | Path to the CA certificate for connecting securely to the Docker API. | +| `TLSCert` | string | Path to the certificate for connecting securely to the Docker API. | +| `TLSKey` | string | Path to the key for connecting securely to the Docker API. | +| `AllowInsecure` | bool | Skip TLS verification when connecting to the Docker API via TLS. | +| `PollInterval` | string | Interval to poll the Docker API for changes. Defaults to `3s`. | +| `EndpointOverride` | string | Override the default GRPC API endpoint for extensions. The default is detected via Swarm. | +| `Extensions` | []Extension | Array of extensions as listed below. | + +## Extension configuration + +Interlock must contain at least one extension to service traffic. +The following options are available to configure the extensions: + +| Option | Type | Description | +|:-------------------|:------------------|:------------------------------------------------------------------------------| +| `Image` | string | Name of the Docker image to use for the extension service. | +| `Args` | []string | Arguments to be passed to the Docker extension service upon creation. | +| `Labels` | map[string]string | Labels to add to the extension service. | +| `ServiceName` | string | Name of the extension service. | +| `ProxyImage` | string | Name of the Docker image to use for the proxy service. | +| `ProxyArgs` | []string | Arguments to be passed to the proxy service upon creation. | +| `ProxyLabels` | map[string]string | Labels to add to the proxy service. | +| `ProxyServiceName` | string | Name of the proxy service. | +| `ProxyConfigPath` | string | Path in the service for the generated proxy configuration. | +| `ServiceCluster` | string | Name of the cluster this extension services. | +| `PublishMode` | string | Publish mode for the proxy service. Supported values are `ingress` or `host`. | +| `PublishedPort` | int | Port where the proxy service serves non-TLS traffic. | +| `PublishedSSLPort` | int | Port where the proxy service serves TLS traffic. | +| `Template` | string | Docker configuration object that is used as the extension template. | +| `Config` | Config | Proxy configuration used by the extensions as listed below. | + +## Proxy configuration + +By default NGINX is used as a proxy, so the following NGINX options are +available for the proxy service: + +| Option | Type | Description | +|:------------------------|:-------|:-----------------------------------------------------------------------------------------------------| +| `User` | string | User to be used in the proxy. | +| `PidPath` | string | Path to the pid file for the proxy service. | +| `MaxConnections` | int | Maximum number of connections for proxy service. | +| `ConnectTimeout` | int | Timeout in seconds for clients to connect. | +| `SendTimeout` | int | Timeout in seconds for the service to send a request to the proxied upstream. | +| `ReadTimeout` | int | Timeout in seconds for the service to read a response from the proxied upstream. | +| `IPHash` | bool | Specifies that requests are distributed between servers based on client IP addresses. | +| `SSLOpts` | string | Options to be passed when configuring SSL. | +| `SSLDefaultDHParam` | int | Size of DH parameters. | +| `SSLDefaultDHParamPath` | string | Path to DH parameters file. | +| `SSLVerify` | string | SSL client verification. | +| `WorkerProcesses` | string | Number of worker processes for the proxy service. | +| `RLimitNoFile` | int | Number of maxiumum open files for the proxy service. | +| `SSLCiphers` | string | SSL ciphers to use for the proxy service. | +| `SSLProtocols` | string | Enable the specified TLS protocols. | +| `AccessLogPath` | string | Path to use for access logs (default: `/dev/stdout`). | +| `ErrorLogPath` | string | Path to use for error logs (default: `/dev/stdout`). | +| `MainLogFormat` | string | [Format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) to use for main logger. | +| `TraceLogFormat` | string | [Format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) to use for trace logger. | diff --git a/datacenter/ucp/3.0/guides/user/interlock/deploy/configure.md b/datacenter/ucp/3.0/guides/user/interlock/deploy/configure.md new file mode 100644 index 0000000000..b0f9ef6b39 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/deploy/configure.md @@ -0,0 +1,64 @@ +--- +title: Configure the layer 7 routing service +description: Learn how to configure the layer 7 routing solution for UCP, that allows + you to route traffic to swarm services. +keywords: routing, proxy +--- + +[When enabling the layer 7 routing solution](index.md) from the UCP web UI, +you can configure the ports for incoming traffic. If you want to further +customize the layer 7 routing solution, you can do it by updating the +`ucp-interlock` service with a new Docker configuration. + +Here's how it works: + +1. Find out what configuration is currently being used for the `ucp-interlock` +service and save it to a file: + + {% raw %} + ```bash + CURRENT_CONFIG_NAME=$(docker service inspect --format '{{ (index .Spec.TaskTemplate.ContainerSpec.Configs 0).ConfigName }}' ucp-interlock) + docker config inspect --format '{{ printf "%s" .Spec.Data }}' $CURRENT_CONFIG_NAME > config.toml + ``` + {% endraw %} + +2. Make the necessary changes to the `config.toml` file. + [Learn about the configuration options available](configuration-reference.md). + +3. Create a new Docker configuration object from the file you've edited: + + ```bash + NEW_CONFIG_NAME="com.docker.ucp.interlock.conf-$(( $(cut -d '-' -f 2 <<< "$CURRENT_CONFIG_NAME") + 1 ))" + docker config create $NEW_CONFIG_NAME config.toml + ``` + +3. Update the `ucp-interlock` service to start using the new configuration: + + ```bash + docker service update \ + --config-rm $CURRENT_CONFIG_NAME \ + --config-add source=$NEW_CONFIG_NAME,target=/config.toml \ + ucp-interlock + ``` + +By default the `ucp-interlock` service is configured to pause if you provide an +invalid configuration. The service won't restart without a manual intervention. + +If you want the service to automatically rollback to a previous stable +configuration, you can update it with: + +```bash +docker service update \ + --update-failure-action rollback \ + ucp-interlock +``` + +Another thing to be aware is that every time you enable the layer 7 routing +solution from the UCP UI, the `ucp-interlock` service is started using the +default configuration. + +If you've customized the configuration used by the `ucp-interlock` service, +you'll have to update it again to use the Docker configuration object +you've created. + + diff --git a/datacenter/ucp/3.0/guides/user/interlock/deploy/host-mode-networking.md b/datacenter/ucp/3.0/guides/user/interlock/deploy/host-mode-networking.md new file mode 100644 index 0000000000..ed7e922d20 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/deploy/host-mode-networking.md @@ -0,0 +1,100 @@ +--- +title: Host mode networking +description: Learn how to configure the UCP layer 7 routing solution with + host mode networking. +keywords: routing, proxy +redirect_from: + - /ee/ucp/interlock/usage/host-mode-networking/ +--- + +By default the layer 7 routing components communicate with one another using +overlay networks. You can customize the components to use host mode networking +instead. + +You can choose to: + +* Configure the `ucp-interlock` and `ucp-interlock-extension` services to +communicate using host mode networking. +* Configure the `ucp-interlock-proxy` and your swarm service to communicate +using host mode networking. +* Use host mode networking for all of the components. + +In this example we'll start with a production-grade deployment of the layer +7 routing solution and update it so that use host mode networking instead of +overlay networking. + +When using host mode networking you won't be able to use DNS service discovery, +since that functionality requires overlay networking. +For two services to communicate, each service needs to know the IP address of +the node where the other service is running. + +## Production-grade deployment + +If you haven't already, configure the +[layer 7 routing solution for production](production.md). + +Once you've done that, the `ucp-interlock-proxy` service replicas should be +running on their own dedicated nodes. + +## Update the ucp-interlock config + +[Update the ucp-interlock service configuration](configure.md) so that it uses +host mode networking. + +Update the `PublishMode` key to: + +```toml +PublishMode = "host" +``` + +When updating the `ucp-interlock` service to use the new Docker configuration, +make sure to update it so that it starts publishes its port on the host: + +```bash +docker service update \ + --config-rm $CURRENT_CONFIG_NAME \ + --config-add source=$NEW_CONFIG_NAME,target=/config.toml \ + --publish-add mode=host,target=8080 \ + ucp-interlock +``` + +The `ucp-interlock` and `ucp-interlock-extension` services are now communicating +using host mode networking. + +## Deploy your swarm services + +Now you can deploy your swarm services. In this example we'll deploy a demo +service that also uses host mode networking. +Set up your CLI client with a [UCP client bundle](../../user-access/cli.md), +and deploy the service: + +```bash +docker service create \ + --name demo \ + --detach=false \ + --label com.docker.lb.hosts=app.example.org \ + --label com.docker.lb.port=8080 \ + --publish mode=host,target=8080 \ + --env METADATA="demo" \ + ehazlett/docker-demo +``` + +Docker allocates a high random port on the host where the service can be reached. +To test that everything is working you can run: + +```bash +curl --header "Host: app.example.org" \ + http://:/ping +``` + +Where: + +* `` is the domain name or IP address of a node where the proxy +service is running. +* `` is the [port you're using to route HTTP traffic](index.md). + +If everything is working correctly, you should get a JSON result like: + +```json +{"instance":"63b855978452", "version":"0.1", "request_id":"d641430be9496937f2669ce6963b67d6"} +``` diff --git a/datacenter/ucp/3.0/guides/user/interlock/deploy/index.md b/datacenter/ucp/3.0/guides/user/interlock/deploy/index.md new file mode 100644 index 0000000000..720d15fc67 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/deploy/index.md @@ -0,0 +1,18 @@ +--- +title: Enable layer 7 routing +description: Learn how to enable the layer 7 routing solution for UCP, that allows + you to route traffic to swarm services. +keywords: routing, proxy +--- + +To enable support for layer 7 routing, also known as HTTP routing mesh, +log in to the UCP web UI as an administrator, navigate to the **Admin Settings** +page, and click the **Routing Mesh** option. Check the **Enable routing mesh** option. + +![http routing mesh](../../../images/interlock-install-3.png){: .with-border} + +By default, the routing mesh service listens on port 80 for HTTP and port +8443 for HTTPS. Change the ports if you already have services that are using +them. + +Once you save, the layer 7 routing service can be used by your swarm services. diff --git a/datacenter/ucp/3.0/guides/user/interlock/deploy/production.md b/datacenter/ucp/3.0/guides/user/interlock/deploy/production.md new file mode 100644 index 0000000000..24d8a0ef10 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/deploy/production.md @@ -0,0 +1,89 @@ +--- +title: Configure layer 7 routing for production +description: Learn how to configure the layer 7 routing solution for a production + environment. +keywords: routing, proxy +--- + +The layer 7 solution that ships out of the box with UCP is highly available +and fault tolerant. It is also designed to work independently of how many +nodes you're managing with UCP. + +![production deployment](../../../images/interlock-deploy-production-1.svg) + +For a production-grade deployment, you should tune the default deployment to +have two nodes dedicated for running the two replicas of the +`ucp-interlock-proxy` service. This ensures: + +* The proxy services have dedicated resources to handle user requests. You +can configure these nodes with higher performance network interfaces. +* No application traffic can be routed to a manager node. This makes your +deployment secure. +* The proxy service is running on two nodes. If one node fails, layer 7 routing +continues working. + +To achieve this you need to: + +1. Enable layer 7 routing. [Learn how](index.md). +2. Pick two nodes that are going to be dedicated to run the proxy service. +3. Apply labels to those nodes, so that you can constrain the proxy service to +only run on nodes with those labels. +4. Update the `ucp-interlock` service to deploy proxies using that constraint. +5. Configure your load balancer to route traffic to the dedicated nodes only. + +## Apply labels to nodes + +In this example, we chose node-5 and node-6 to be dedicated just for running +the proxy service. To apply labels to those nodes run: + +```bash +docker node update --label-add nodetype=loadbalancer +``` + +To make sure the label was successfully applied, run: + +{% raw %} +```bash +docker node inspect --format '{{ index .Spec.Labels "nodetype" }}' +``` +{% endraw %} + +The command should print "loadbalancer". + +## Configure the ucp-interlock service + +Now that your nodes are labelled, you need to update the `ucp-interlock` +service configuration to deploy the proxy service with the correct constraints. + +Add another constraint to the `ProxyConstraints` array: + +```toml +[Extensions] + [Extensions.default] + ProxyConstraints = ["node.labels.com.docker.ucp.orchestrator.swarm==true", "node.platform.os==linux", "node.labels.nodetype==loadbalancer"] +``` + +[Learn how to configure ucp-interlock](configure.md). + +> Known issue +> +> In UCP 3.0.0 the `ucp-interlock` service won't redeploy the proxy replicas +> when you update the configuration. As a workaround, +> [deploy a demo service](../usage/index.md). Once you do that, the proxy +services are redeployed and scheduled on the correct nodes. +{: .important} + +Once you reconfigure the `ucp-interlock` service, you can check if the proxy +service is running on the dedicated nodes: + +```bash +docker service ps ucp-interlock-proxy +``` + +## Configure your load balancer + +Once the proxy service is running on dedicated nodes, configure your upstream +load balancer with the domain names or IP addresses of those nodes. + +This makes sure all traffic is directed to these nodes. + diff --git a/datacenter/ucp/3.0/guides/user/interlock/index.md b/datacenter/ucp/3.0/guides/user/interlock/index.md new file mode 100644 index 0000000000..2deef44542 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/index.md @@ -0,0 +1,52 @@ +--- +title: Layer 7 routing overview +description: Learn how to route layer 7 traffic to your swarm services +keywords: routing, proxy +--- + +Docker Engine running in swarm mode has a routing mesh, which makes it easy +to expose your services to the outside world. Since all nodes participate +in the routing mesh, users can access your service by contacting any node. + +![swarm routing mess](../../images/interlock-overview-1.svg) + +In this example the WordPress service is listening on port 8000 of the routing +mesh. Even though the service is running on a single node, users can access +WordPress using the domain name or IP of any of the nodes that are part of +the swarm. + +UCP extends this one step further with layer 7 layer routing (also known as +application layer 7), allowing users to access Docker services using domain names +instead of IP addresses. + +This functionality is made available through the Interlock component. + +![layer 7 routing](../../images/interlock-overview-2.svg) + +In this example, users can access the WordPress service using +`http://wordpress.example.org`. Interlock takes care of routing traffic to +the right place. + +Interlock is specific to the Swarm orchestrator. If you're trying to route +traffic to your Kubernetes applications, check +[layer 7 routing with Kubernetes.](../kubernetes/layer-7-routing.md) + +## Features and benefits + +Layer 7 routing in UCP supports: + +* **High availability**: All the components used for layer 7 routing leverage +Docker swarm for high availability, and handle failures gracefully. +* **Automatic configuration**: UCP monitors your services and automatically +reconfigures the proxy services so that everything handled for you. +* **Scalability**: You can customize and tune the proxy services that handle +user-facing requests to meet whatever demand your services have. +* **TLS**: You can leverage Docker secrets to securely manage TLS Certificates +and keys for your services. Both TLS termination and TCP passthrough are supported. +* **Context-based routing**: You can define where to route the request based on +context or path. +* **Host mode networking**: By default layer 7 routing leverages the Docker Swarm +routing mesh, but you don't have to. You can use host mode networking for maximum +performance. +* **Security**: The layer 7 routing components that are exposed to the outside +world run on worker nodes. Even if they get compromised, your cluster won't. diff --git a/datacenter/ucp/3.0/guides/user/interlock/upgrade.md b/datacenter/ucp/3.0/guides/user/interlock/upgrade.md new file mode 100644 index 0000000000..426b8e499b --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/upgrade.md @@ -0,0 +1,129 @@ +--- +title: Layer 7 routing upgrade +description: Learn how to route layer 7 traffic to your swarm services +keywords: routing, proxy, hrm +--- + +The [HTTP routing mesh](/datacenter/ucp/2.2/guides/admin/configure/use-domain-names-to-access-services.md) +functionality was redesigned in UCP 3.0 for greater security and flexibility. +The functionality was also renamed to "layer 7 routing", to make it easier for +new users to get started. + +[Learn about the new layer 7 routing functionality](index.md). + +To route traffic to your service you apply specific labels to your swarm +services, describing the hostname for the service and other configurations. +Things work in the same way as they did with the HTTP routing mesh, with the +only difference being that you use different labels. + +You don't have to manually update your services. During the upgrade process to +3.0, UCP updates the services to start using new labels. + +This article describes the upgrade process for the routing component, so that +you can troubleshoot UCP and your services, in case something goes wrong with +the upgrade. + +# UCP upgrade process + +If you are using the HTTP routing mesh, and start an upgrade to UCP 3.0: + +1. UCP starts a reconciliation process to ensure all internal components are +deployed. As part of this, services using HRM labels are inspected. +2. UCP creates the `com.docker.ucp.interlock.conf-` based on HRM configurations. +3. The HRM service is removed. +4. The `ucp-interlock` service is deployed with the configuration created. +5. The `ucp-interlock` service deploys the `ucp-interlock-extension` and +`ucp-interlock-proxy-services`. + +The only way to rollback from an upgrade is by restoring from a backup taken +before the upgrade. If something goes wrong during the upgrade process, you +need to troubleshoot the interlock services and your services, since the HRM +service won't be running after the upgrade. + +[Learn more about the interlock services and architecture](architecture.md). + +## Check that routing works + +After upgrading to UCP 3.0, you should check if all swarm services are still +routable. + +For services using HTTP: + +```bash +curl -vs http://:/ -H "Host: " +``` + +For services using HTTPS: + +```bash +curl -vs https://: +``` + +After the upgrade, check that you can still use the same hostnames to access +the swarm services. + +## The ucp-interlock services are not running + +After the upgrade to UCP 3.0, the following services should be running: + +* `ucp-interlock`: monitors swarm workloads configured to use layer 7 routing. +* `ucp-interlock-extension`: Helper service that generates the configuration for +the `ucp-interlock-proxy` service. +* `ucp-interlock-proxy`: A service that provides load balancing and proxying for +swarm workloads. + +To check if these services are running, use a client bundle with administrator +permissions and run: + +```bash +docker ps --filter "name=ucp-interlock" +``` + +* If the `ucp-interlock` service doesn't exist or is not running, something went +wrong with the reconciliation step. +* If this still doesn't work, it's possible that UCP is having problems creating +the `com.docker.ucp.interlock.conf-1`, due to name conflicts. Make sure you +don't have any configuration with the same name by running: + ``` + docker config ls --filter "name=com.docker.ucp.interlock" + ``` +* If either the `ucp-interlock-extension` or `ucp-interlock-proxy` services are +not running, it's possible that there are port conflicts. +As a workaround re-enable the layer 7 routing configuration from the +[UCP settings page](deploy/index.md). Make sure the ports you choose are not +being used by other services. + +## Workarounds and clean-up + +If you have any of the problems above, disable and enable the layer 7 routing +setting on the [UCP settings page](deploy/index.md). This redeploys the +services with their default configuration. + +When doing that make sure you specify the same ports you were using for HRM, +and that no other services are listening on those ports. + +You should also check if the `ucp-hrm` service is running. If it is, you should +stop it since it can conflict with the `ucp-interlock-proxy` service. + +## Optionally remove labels + +As part of the upgrade process UCP adds the +[labels specific to the new layer 7 routing solution](usage/labels-reference.md). + +You can update your services to remove the old HRM labels, since they won't be +used anymore. + +## Optionally segregate control traffic + +Interlock is designed so that all the control traffic is kept separate from +the application traffic. + +If before upgrading you had all your applications attached to the `ucp-hrm` +network, after upgrading you can update your services to start using a +dedicated network for routing that's not shared with other services. +[Learn how to use a dedicated network](usage/index.md). + +If before upgrading you had a dedicate network to route traffic to each service, +Interlock will continue using those dedicated networks. However the +`ucp-interlock` will be attached to each of those networks. You can update +the `ucp-interlock` service so that it is only connected to the `ucp-hrm` network. diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/canary.md b/datacenter/ucp/3.0/guides/user/interlock/usage/canary.md new file mode 100644 index 0000000000..138dc5816b --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/canary.md @@ -0,0 +1,107 @@ +--- +title: Canary application instances +description: Learn how to do canary deployments for your Docker swarm services +keywords: routing, proxy +--- + +In this example we will publish a service and deploy an updated service as canary instances. + +First we will create an overlay network so that service traffic is isolated and secure: + +```bash +$> docker network create -d overlay demo +1se1glh749q1i4pw0kf26mfx5 +``` + +Next we will create the initial service: + +```bash +$> docker service create \ + --name demo-v1 \ + --network demo \ + --detach=false \ + --replicas=4 \ + --label com.docker.lb.hosts=demo.local \ + --label com.docker.lb.port=8080 \ + --env METADATA="demo-version-1" \ + ehazlett/docker-demo +``` + +Interlock will detect once the service is available and publish it. Once the tasks are running +and the proxy service has been updated the application should be available via `http://demo.local`: + +```bash +$> curl -vs -H "Host: demo.local" http://127.0.0.1/ping +* Trying 127.0.0.1... +* TCP_NODELAY set +* Connected to demo.local (127.0.0.1) port 80 (#0) +> GET /ping HTTP/1.1 +> Host: demo.local +> User-Agent: curl/7.54.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Server: nginx/1.13.6 +< Date: Wed, 08 Nov 2017 20:28:26 GMT +< Content-Type: text/plain; charset=utf-8 +< Content-Length: 120 +< Connection: keep-alive +< Set-Cookie: session=1510172906715624280; Path=/; Expires=Thu, 09 Nov 2017 20:28:26 GMT; Max-Age=86400 +< x-request-id: f884cf37e8331612b8e7630ad0ee4e0d +< x-proxy-id: 5ad7c31f9f00 +< x-server-info: interlock/2.0.0-development (147ff2b1) linux/amd64 +< x-upstream-addr: 10.0.2.4:8080 +< x-upstream-response-time: 1510172906.714 +< +{"instance":"df20f55fc943","version":"0.1","metadata":"demo-version-1","request_id":"f884cf37e8331612b8e7630ad0ee4e0d"} +``` + +Notice the `metadata` with `demo-version-1`. + +Now we will deploy a "new" version: + +```bash +$> docker service create \ + --name demo-v2 \ + --network demo \ + --detach=false \ + --label com.docker.lb.hosts=demo.local \ + --label com.docker.lb.port=8080 \ + --env METADATA="demo-version-2" \ + --env VERSION="0.2" \ + ehazlett/docker-demo +``` + +Since this has a replica of one (1) and the initial version has four (4) replicas 20% of application traffic +will be sent to `demo-version-2`: + +```bash +$> curl -vs -H "Host: demo.local" http://127.0.0.1/ping +{"instance":"23d9a5ec47ef","version":"0.1","metadata":"demo-version-1","request_id":"060c609a3ab4b7d9462233488826791c"} +$> curl -vs -H "Host: demo.local" http://127.0.0.1/ping +{"instance":"f42f7f0a30f9","version":"0.1","metadata":"demo-version-1","request_id":"c848e978e10d4785ac8584347952b963"} +$> curl -vs -H "Host: demo.local" http://127.0.0.1/ping +{"instance":"c2a686ae5694","version":"0.1","metadata":"demo-version-1","request_id":"724c21d0fb9d7e265821b3c95ed08b61"} +$> curl -vs -H "Host: demo.local" http://127.0.0.1/ping +{"instance":"1b0d55ed3d2f","version":"0.2","metadata":"demo-version-2","request_id":"b86ff1476842e801bf20a1b5f96cf94e"} +$> curl -vs -H "Host: demo.local" http://127.0.0.1/ping +{"instance":"c2a686ae5694","version":"0.1","metadata":"demo-version-1","request_id":"724c21d0fb9d7e265821b3c95ed08b61"} +``` + +To increase traffic to the new version add more replicas with `docker scale`: + +```bash +$> docker service scale demo-v2=4 +demo-v2 +``` + +To complete the upgrade, scale the `demo-v1` service to zero (0): + +```bash +$> docker service scale demo-v1=0 +demo-v1 +``` + +This will route all application traffic to the new version. If you need to rollback, simply scale the v1 service +back up and v2 down. + diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/context.md b/datacenter/ucp/3.0/guides/user/interlock/usage/context.md new file mode 100644 index 0000000000..a8f4daa5ec --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/context.md @@ -0,0 +1,65 @@ +--- +title: Context/path based routing +description: Learn how to do route traffic to your Docker swarm services based + on a url path +keywords: routing, proxy +--- + +In this example we will publish a service using context or path based routing. + +First we will create an overlay network so that service traffic is isolated and secure: + +```bash +$> docker network create -d overlay demo +1se1glh749q1i4pw0kf26mfx5 +``` + +Next we will create the initial service: + +```bash +$> docker service create \ + --name demo \ + --network demo \ + --detach=false \ + --label com.docker.lb.hosts=demo.local \ + --label com.docker.lb.port=8080 \ + --label com.docker.lb.context_root=/app \ + --label com.docker.lb.context_root_rewrite=true \ + --env METADATA="demo-context-root" \ + ehazlett/docker-demo +``` + +> Only one path per host +> +> Interlock supports only one path per host per service cluster. Once a +> particular `com.docker.lb.hosts` label has been applied, it cannot be applied +> again in the same service cluster. +{: .important} + +Interlock will detect once the service is available and publish it. Once the tasks are running +and the proxy service has been updated the application should be available via `http://demo.local`: + +```bash +$> curl -vs -H "Host: demo.local" http://127.0.0.1/app/ +* Trying 127.0.0.1... +* TCP_NODELAY set +* Connected to 127.0.0.1 (127.0.0.1) port 80 (#0) +> GET /app/ HTTP/1.1 +> Host: demo.local +> User-Agent: curl/7.54.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Server: nginx/1.13.6 +< Date: Fri, 17 Nov 2017 14:25:17 GMT +< Content-Type: text/html; charset=utf-8 +< Transfer-Encoding: chunked +< Connection: keep-alive +< x-request-id: 077d18b67831519defca158e6f009f82 +< x-proxy-id: 77c0c37d2c46 +< x-server-info: interlock/2.0.0-dev (732c77e7) linux/amd64 +< x-upstream-addr: 10.0.1.3:8080 +< x-upstream-response-time: 1510928717.306 +... +``` + diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/default-service.md b/datacenter/ucp/3.0/guides/user/interlock/usage/default-service.md new file mode 100644 index 0000000000..fffe86cd8a --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/default-service.md @@ -0,0 +1,50 @@ +--- +title: Set a default service +description: Learn about Interlock, an application routing and load balancing system + for Docker Swarm. +keywords: ucp, interlock, load balancing +--- + +The default proxy service used by UCP to provide layer 7 routing is NGINX, +so when users try to access a route that hasn't been configured, they will +see the default NGINX 404 page. + +![Default NGINX page](../../../images/interlock-default-service-1.png){: .with-border} + +You can customize this by labelling a service with +`com.docker.lb.defaul_backend=true`. When users try to access a route that's +not configured, they are redirected to this service. + +As an example, create a `docker-compose.yml` file with: + +```yaml +version: "3.2" + +services: + demo: + image: ehazlett/interlock-default-app + deploy: + replicas: 1 + labels: + com.docker.lb.default_backend: "true" + com.docker.lb.port: 80 + networks: + - demo-network + +networks: + demo-network: + driver: overlay +``` + +Set up your CLI client with a [UCP client bundle](../../user-access/cli.md), +and deploy the service: + +```bash +docker stack deploy --compose-file docker-compose.yml demo +``` + +Once users try to access a route that's not configured, they are directed +to this demo service. + +![Custom default page](../../../images/interlock-default-service-2.png){: .with-border} + diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/index.md b/datacenter/ucp/3.0/guides/user/interlock/usage/index.md new file mode 100644 index 0000000000..82f0922cf3 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/index.md @@ -0,0 +1,95 @@ +--- +title: Route traffic to a simple swarm service +description: Learn how to do canary deployments for your Docker swarm services +keywords: routing, proxy +--- + +Once the [layer 7 routing solution is enabled](../deploy/index.md), you can +start using it in your swarm services. + +In this example we'll deploy a simple service which: + +* Has a JSON endpoint that returns the ID of the task serving the request. +* Has a web UI that shows how many tasks the service is running. +* Can be reached at `http://app.example.org`. + +## Deploy the service + +Create a `docker-compose.yml` file with: + +```yaml +version: "3.2" + +services: + demo: + image: ehazlett/docker-demo + deploy: + replicas: 1 + labels: + com.docker.lb.hosts: app.example.org + com.docker.lb.network: demo-network + com.docker.lb.port: 8080 + networks: + - demo-network + +networks: + demo-network: + driver: overlay +``` + +Note that: + +* The `com.docker.lb.hosts` label defines the hostname for the service. When +the layer 7 routing solution gets a request containing `app.example.org` in +the host header, that request is forwarded to the demo service. +* The `com.docker.lb.network` defines which network the `ucp-interlock-proxy` +should attach to in order to be able to communicate with the demo service. +To use layer 7 routing, your services need to be attached to at least one network. +If your service is only attached to a single network, you don't need to add +a label to specify which network to use for routing. +* The `com.docker.lb.port` label specifies which port the `ucp-interlock-proxy` +service should use to communicate with this demo service. +* Your service doesn't need to expose a port in the swarm routing mesh. All +communications are done using the network you've specified. + +Set up your CLI client with a [UCP client bundle](../../user-access/cli.md), +and deploy the service: + +```bash +docker stack deploy --compose-file docker-compose.yml demo +``` + +The `ucp-interlock` service detects that your service is using these labels +and automatically reconfigures the `ucp-interlock-proxy` service. + +## Test using the CLI + +To test that requests are routed to the demo service, run: + +```bash +curl --header "Host: app.example.org" \ + http://:/ping +``` + +Where: + +* `` is the domain name or IP address of a UCP node. +* `` is the [port you're using to route HTTP traffic](../deploy/index.md). + +If everything is working correctly, you should get a JSON result like: + +```json +{"instance":"63b855978452", "version":"0.1", "request_id":"d641430be9496937f2669ce6963b67d6"} +``` + +## Test using a browser + +Since the demo service exposes an HTTP endpoint, you can also use your browser +to validate that everything is working. + +Make sure the `/etc/hosts` file in your system has an entry mapping +`app.example.org` to the IP address of a UCP node. Once you do that, you'll be +able to start using the service from your browser. + +![browser](../../../images/route-simple-app-1.png){: .with-border } + diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/interlock_service_clusters.png b/datacenter/ucp/3.0/guides/user/interlock/usage/interlock_service_clusters.png new file mode 100644 index 0000000000..84ad5f1898 Binary files /dev/null and b/datacenter/ucp/3.0/guides/user/interlock/usage/interlock_service_clusters.png differ diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/labels-reference.md b/datacenter/ucp/3.0/guides/user/interlock/usage/labels-reference.md new file mode 100644 index 0000000000..263c055286 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/labels-reference.md @@ -0,0 +1,31 @@ +--- +title: Layer 7 routing labels reference +description: Learn about the labels you can use in your swarm services to route + layer 7 traffic to them. +keywords: routing, proxy +--- + +Once the layer 7 routing solution is enabled, you can +[start using it in your swarm services](index.md). + +The following labels are available for you to use in swarm services: + + +| Label | Description | Example | +|:---------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------| +| `com.docker.lb.hosts` | Comma separated list of the hosts that the service should serve. | `example.com,test.com` | +| `com.docker.lb.port` | Port to use for internal upstream communication. | `8080` | +| `com.docker.lb.network` | Name of network the proxy service should attach to for upstream connectivity. | `app-network-a` | +| `com.docker.lb.context_root` | Context or path to use for the application. | `/app` | +| `com.docker.lb.context_root_rewrite` | Boolean to enable rewrite for the context root. | `true` | +| `com.docker.lb.ssl_only` | Boolean to force SSL for application. | `true` | +| `com.docker.lb.ssl_cert` | Docker secret to use for the SSL certificate. | `example.com.cert` | +| `com.docker.lb.ssl_key` | Docker secret to use for the SSL key. | `example.com.key` | +| `com.docker.lb.websocket_endpoints` | Comma separated list of endpoints to configure to be upgraded for websockets. | `/ws,/foo` | +| `com.docker.lb.service_cluster` | Name of the service cluster to use for the application. | `us-east` | +| `com.docker.lb.ssl_backend` | Enable SSL communication to the upstreams. | `true` | +| `com.docker.lb.ssl_backend_tls_verify` | Verification mode for the upstream TLS. | `none` | +| `com.docker.lb.sticky_session_cookie` | Cookie to use for sticky sessions. | `none` | +| `com.docker.lb.redirects` | Semi-colon separated list of redirects to add in the format of `,`. Example: `http://old.example.com,http://new.example.com;` | `none` | +| `com.docker.lb.ssl_passthrough` | Enable SSL passthrough. | `false` | + diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/redirects.md b/datacenter/ucp/3.0/guides/user/interlock/usage/redirects.md new file mode 100644 index 0000000000..0f060b7a3c --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/redirects.md @@ -0,0 +1,69 @@ +--- +title: Application redirects +description: Learn how to implement redirects using swarm services and the + layer 7 routing solution for UCP. +keywords: routing, proxy, redirects +--- + +Once the [layer 7 routing solution is enabled](../deploy/index.md), you can +start using it in your swarm services. In this example we'll deploy a simple +service that can be reached at `app.example.org`. We'll also redirect +requests to `old.example.org` to that service. + +To do that, create a docker-compose.yml file with: + +```yaml +version: "3.2" + +services: + demo: + image: ehazlett/docker-demo + deploy: + replicas: 1 + labels: + com.docker.lb.hosts: app.example.org,old.example.org + com.docker.lb.network: demo-network + com.docker.lb.port: 8080 + com.docker.lb.redirects: http://old.example.org,http://app.example.org + networks: + - demo-network + +networks: + demo-network: + driver: overlay +``` + +Note that the demo service has labels to signal that traffic for both +`app.example.org` and `old.example.org` should be routed to this service. +There's also a label indicating that all traffic directed to `old.example.org` +should be redirected to `app.example.org`. + +Set up your CLI client with a [UCP client bundle](../../user-access/cli.md), +and deploy the service: + +```bash +docker stack deploy --compose-file docker-compose.yml demo +``` + +You can also use the CLI to test if the redirect is working, by running: + +```bash +curl --head --header "Host: old.example.org" http://: +``` + +You should see something like: + +```none +HTTP/1.1 302 Moved Temporarily +Server: nginx/1.13.8 +Date: Thu, 29 Mar 2018 23:16:46 GMT +Content-Type: text/html +Content-Length: 161 +Connection: keep-alive +Location: http://app.example.org/ +``` + +You can also test that the redirect works from your browser. For that, you +need to make sure you add entries for both `app.example.org` and +`old.example.org` to your `/etc/hosts` file, mapping them to the IP address +of a UCP node. diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/service-clusters.md b/datacenter/ucp/3.0/guides/user/interlock/usage/service-clusters.md new file mode 100644 index 0000000000..c2d1f2ce9d --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/service-clusters.md @@ -0,0 +1,200 @@ +--- +title: Service clusters +description: Learn about Interlock, an application routing and load balancing system + for Docker Swarm. +keywords: ucp, interlock, load balancing +--- + +In this example we will configure an eight (8) node Swarm cluster that uses service clusters +to route traffic to different proxies. There are three (3) managers +and five (5) workers. Two of the workers are configured with node labels to be dedicated +ingress cluster load balancer nodes. These will receive all application traffic. + +This example will not cover the actual deployment of infrastructure. +It assumes you have a vanilla Swarm cluster (`docker init` and `docker swarm join` from the nodes). +See the [Swarm](https://docs.docker.com/engine/swarm/) documentation if you need help +getting a Swarm cluster deployed. + +![Interlock Service Clusters](interlock_service_clusters.png) + +We will configure the load balancer worker nodes (`lb-00` and `lb-01`) with node labels in order to pin the Interlock Proxy +service. Once you are logged into one of the Swarm managers run the following to add node labels +to the dedicated ingress workers: + +```bash +$> docker node update --label-add nodetype=loadbalancer --label-add region=us-east lb-00 +lb-00 +$> docker node update --label-add nodetype=loadbalancer --label-add region=us-west lb-01 +lb-01 +``` + +You can inspect each node to ensure the labels were successfully added: + +```bash +{% raw %} +$> docker node inspect -f '{{ .Spec.Labels }}' lb-00 +map[nodetype:loadbalancer region:us-east] +$> docker node inspect -f '{{ .Spec.Labels }}' lb-01 +map[nodetype:loadbalancer region:us-west] +{% endraw %} +``` + +Next, we will create a configuration object for Interlock that contains multiple extensions with varying service clusters: + +```bash +$> cat << EOF | docker config create service.interlock.conf - +ListenAddr = ":8080" +DockerURL = "unix:///var/run/docker.sock" +PollInterval = "3s" + +[Extensions] + [Extensions.us-east] + Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}" + Args = ["-D"] + ServiceName = "interlock-ext-us-east" + ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}" + ProxyArgs = [] + ProxyServiceName = "interlock-proxy-us-east" + ProxyConfigPath = "/etc/nginx/nginx.conf" + ServiceCluster = "us-east" + PublishMode = "host" + PublishedPort = 80 + TargetPort = 80 + PublishedSSLPort = 443 + TargetSSLPort = 443 + [Extensions.us-east.Config] + User = "nginx" + PidPath = "/var/run/proxy.pid" + WorkerProcesses = 1 + RlimitNoFile = 65535 + MaxConnections = 2048 + [Extensions.us-east.Labels] + ext_region = "us-east" + [Extensions.us-east.ProxyLabels] + proxy_region = "us-east" + + [Extensions.us-west] + Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}" + Args = ["-D"] + ServiceName = "interlock-ext-us-west" + ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}" + ProxyArgs = [] + ProxyServiceName = "interlock-proxy-us-west" + ProxyConfigPath = "/etc/nginx/nginx.conf" + ServiceCluster = "us-west" + PublishMode = "host" + PublishedPort = 80 + TargetPort = 80 + PublishedSSLPort = 443 + TargetSSLPort = 443 + [Extensions.us-west.Config] + User = "nginx" + PidPath = "/var/run/proxy.pid" + WorkerProcesses = 1 + RlimitNoFile = 65535 + MaxConnections = 2048 + [Extensions.us-west.Labels] + ext_region = "us-west" + [Extensions.us-west.ProxyLabels] + proxy_region = "us-west" +EOF +oqkvv1asncf6p2axhx41vylgt +``` +Note that we are using "host" mode networking in order to use the same ports (`80` and `443`) in the cluster. We cannot use ingress +networking as it reserves the port across all nodes. If you want to use ingress networking you will have to use different ports +for each service cluster. + +Next we will create a dedicated network for Interlock and the extensions: + +```bash +$> docker network create -d overlay interlock +``` + +Now we can create the Interlock service: + +```bash +$> docker service create \ + --name interlock \ + --mount src=/var/run/docker.sock,dst=/var/run/docker.sock,type=bind \ + --network interlock \ + --constraint node.role==manager \ + --config src=service.interlock.conf,target=/config.toml \ + { page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} -D run -c /config.toml +sjpgq7h621exno6svdnsvpv9z +``` + +## Configure Proxy Services +Once we have the node labels we can re-configure the Interlock Proxy services to be constrained to the +workers for each region. Again, from a manager run the following to pin the proxy services to the ingress workers: + +```bash +$> docker service update \ + --constraint-add node.labels.nodetype==loadbalancer \ + --constraint-add node.labels.region==us-east \ + interlock-proxy-us-east +$> docker service update \ + --constraint-add node.labels.nodetype==loadbalancer \ + --constraint-add node.labels.region==us-west \ + interlock-proxy-us-west +``` + +We are now ready to deploy applications. First we will create individual networks for each application: + +```bash +$> docker network create -d overlay demo-east +$> docker network create -d overlay demo-west +``` + +Next we will deploy the application in the `us-east` service cluster: + +```bash +$> docker service create \ + --name demo-east \ + --network demo-east \ + --detach=true \ + --label com.docker.lb.hosts=demo-east.local \ + --label com.docker.lb.port=8080 \ + --label com.docker.lb.service_cluster=us-east \ + --env METADATA="us-east" \ + ehazlett/docker-demo +``` + +Now we deploy the application in the `us-west` service cluster: + +```bash +$> docker service create \ + --name demo-west \ + --network demo-west \ + --detach=true \ + --label com.docker.lb.hosts=demo-west.local \ + --label com.docker.lb.port=8080 \ + --label com.docker.lb.service_cluster=us-west \ + --env METADATA="us-west" \ + ehazlett/docker-demo +``` + +Only the service cluster that is designated will be configured for the applications. For example, the `us-east` service cluster +will not be configured to serve traffic for the `us-west` service cluster and vice versa. We can see this in action when we +send requests to each service cluster. + +When we send a request to the `us-east` service cluster it only knows about the `us-east` application (be sure to ssh to the `lb-00` node): + +```bash +{% raw %} +$> curl -H "Host: demo-east.local" http://$(docker node inspect -f '{{ .Status.Addr }}' lb-00)/ping +{"instance":"1b2d71619592","version":"0.1","metadata":"us-east","request_id":"3d57404cf90112eee861f9d7955d044b"} +$> curl -H "Host: demo-west.local" http://$(docker node inspect -f '{{ .Status.Addr }}' lb-00)/ping + +404 Not Found + +

    404 Not Found

    +
    nginx/1.13.6
    + + +{% endraw %} +``` + +Application traffic is isolated to each service cluster. Interlock also ensures that a proxy will only be updated if it has corresponding updates +to its designated service cluster. So in this example, updates to the `us-east` cluster will not affect the `us-west` cluster. If there is a problem +the others will not be affected. + diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/sessions.md b/datacenter/ucp/3.0/guides/user/interlock/usage/sessions.md new file mode 100644 index 0000000000..f1104ec486 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/sessions.md @@ -0,0 +1,131 @@ +--- +title: Persistent (sticky) sessions +description: Learn how to configure your swarm services with persistent sessions + using UCP. +keywords: routing, proxy +--- + +In this example we will publish a service and configure the proxy for persistent (sticky) sessions. + +# Cookies +In the following example we will show how to configure sticky sessions using cookies. + +First we will create an overlay network so that service traffic is isolated and secure: + +```bash +$> docker network create -d overlay demo +1se1glh749q1i4pw0kf26mfx5 +``` + +Next we will create the service with the cookie to use for sticky sessions: + +```bash +$> docker service create \ + --name demo \ + --network demo \ + --detach=false \ + --replicas=5 \ + --label com.docker.lb.hosts=demo.local \ + --label com.docker.lb.sticky_session_cookie=session \ + --label com.docker.lb.port=8080 \ + --env METADATA="demo-sticky" \ + ehazlett/docker-demo +``` + +Interlock will detect once the service is available and publish it. Once the tasks are running +and the proxy service has been updated the application should be available via `http://demo.local` +and configured to use sticky sessions: + +```bash +$> curl -vs -c cookie.txt -b cookie.txt -H "Host: demo.local" http://127.0.0.1/ping +* Trying 127.0.0.1... +* TCP_NODELAY set +* Connected to 127.0.0.1 (127.0.0.1) port 80 (#0) +> GET /ping HTTP/1.1 +> Host: demo.local +> User-Agent: curl/7.54.0 +> Accept: */* +> Cookie: session=1510171444496686286 +> +< HTTP/1.1 200 OK +< Server: nginx/1.13.6 +< Date: Wed, 08 Nov 2017 20:04:36 GMT +< Content-Type: text/plain; charset=utf-8 +< Content-Length: 117 +< Connection: keep-alive +* Replaced cookie session="1510171444496686286" for domain demo.local, path /, expire 0 +< Set-Cookie: session=1510171444496686286 +< x-request-id: 3014728b429320f786728401a83246b8 +< x-proxy-id: eae36bf0a3dc +< x-server-info: interlock/2.0.0-development (147ff2b1) linux/amd64 +< x-upstream-addr: 10.0.2.5:8080 +< x-upstream-response-time: 1510171476.948 +< +{"instance":"9c67a943ffce","version":"0.1","metadata":"demo-sticky","request_id":"3014728b429320f786728401a83246b8"} +``` + +Notice the `Set-Cookie` from the application. This is stored by the `curl` command and sent with subsequent requests +which are pinned to the same instance. If you make a few requests you will notice the same `x-upstream-addr`. + +# IP Hashing +In this example we show how to configure sticky sessions using client IP hashing. This is not as flexible or consistent +as cookies but enables workarounds for some applications that cannot use the other method. + +First we will create an overlay network so that service traffic is isolated and secure: + +```bash +$> docker network create -d overlay demo +1se1glh749q1i4pw0kf26mfx5 +``` + +Next we will create the service with the cookie to use for sticky sessions using IP hashing: + +```bash +$> docker service create \ + --name demo \ + --network demo \ + --detach=false \ + --replicas=5 \ + --label com.docker.lb.hosts=demo.local \ + --label com.docker.lb.port=8080 \ + --label com.docker.lb.ip_hash=true \ + --env METADATA="demo-sticky" \ + ehazlett/docker-demo +``` + +Interlock will detect once the service is available and publish it. Once the tasks are running +and the proxy service has been updated the application should be available via `http://demo.local` +and configured to use sticky sessions: + +```bash +$> curl -vs -H "Host: demo.local" http://127.0.0.1/ping +* Trying 127.0.0.1... +* TCP_NODELAY set +* Connected to 127.0.0.1 (127.0.0.1) port 80 (#0) +> GET /ping HTTP/1.1 +> Host: demo.local +> User-Agent: curl/7.54.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Server: nginx/1.13.6 +< Date: Wed, 08 Nov 2017 20:04:36 GMT +< Content-Type: text/plain; charset=utf-8 +< Content-Length: 117 +< Connection: keep-alive +< x-request-id: 3014728b429320f786728401a83246b8 +< x-proxy-id: eae36bf0a3dc +< x-server-info: interlock/2.0.0-development (147ff2b1) linux/amd64 +< x-upstream-addr: 10.0.2.5:8080 +< x-upstream-response-time: 1510171476.948 +< +{"instance":"9c67a943ffce","version":"0.1","metadata":"demo-sticky","request_id":"3014728b429320f786728401a83246b8"} +``` + +You can use `docker service scale demo=10` to add some more replicas. Once scaled, you will notice that requests are pinned +to a specific backend. + +Note: due to the way the IP hashing works for extensions, you will notice a new upstream address when scaling replicas. This is +expected as internally the proxy uses the new set of replicas to decide on a backend on which to pin. Once the upstreams are +determined a new "sticky" backend will be chosen and that will be the dedicated upstream. + diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/tls.md b/datacenter/ucp/3.0/guides/user/interlock/usage/tls.md new file mode 100644 index 0000000000..9e619f97a1 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/tls.md @@ -0,0 +1,195 @@ +--- +title: Applications with SSL +description: Learn how to configure your swarm services with TLS using the layer + 7 routing solution for UCP. +keywords: routing, proxy, tls +--- + +Once the [layer 7 routing solution is enabled](../deploy/index.md), you can +start using it in your swarm services. You have two options for securing your +services with TLS: + +* Let the proxy terminate the TLS connection. All traffic between end-users and +the proxy is encrypted, but the traffic going between the proxy and your swarm +service is not secured. +* Let your swarm service terminate the TLS connection. The end-to-end traffic +is encrypted and the proxy service allows TLS traffic to passthrough unchanged. + +In this example we'll deploy a service that can be reached at `app.example.org` +using these two options. + +No matter how you choose to secure your swarm services, there are two steps to +route traffic with TLS: + +1. Create [Docker secrets](/engine/swarm/secrets.md) to manage from a central +place the private key and certificate used for TLS. +2. Add labels to your swarm service for UCP to reconfigure the proxy service. + + +## Let the proxy handle TLS + +In this example we'll deploy a swarm service and let the proxy service handle +the TLS connection. All traffic between the proxy and the swarm service is +not secured, so you should only use this option if you trust that no one can +monitor traffic inside services running on your datacenter. + +![TLS Termination](../../../images/interlock-tls-1.png) + +Start by getting a private key and certificate for the TLS connection. Make +sure the Common Name in the certificate matches the name where your service +is going to be available. + +You can generate a self-signed certificate for `app.example.org` by running: + +```bash +openssl req \ + -new \ + -newkey rsa:4096 \ + -days 3650 \ + -nodes \ + -x509 \ + -subj "/C=US/ST=CA/L=SF/O=Docker-demo/CN=app.example.org" \ + -keyout app.example.org.key \ + -out app.example.org.cert +``` + +Then, create a docker-compose.yml file with the following content: + +```yml +version: "3.2" + +services: + demo: + image: ehazlett/docker-demo + deploy: + replicas: 1 + labels: + com.docker.lb.hosts: app.example.org + com.docker.lb.network: demo-network + com.docker.lb.port: 8080 + com.docker.lb.ssl_cert: demo_app.example.org.cert + com.docker.lb.ssl_key: demo_app.example.org.key + environment: + METADATA: proxy-handles-tls + networks: + - demo-network + +networks: + demo-network: + driver: overlay +secrets: + app.example.org.cert: + file: ./app.example.org.cert + app.example.org.key: + file: ./app.example.org.key +``` + +Notice that the demo service has labels describing that the proxy service should +route traffic to `app.example.org` to this service. All traffic between the +service and proxy takes place using the `demo-network` network. The service also +has labels describing the Docker secrets to use on the proxy service to terminate +the TLS connection. + +Since the private key and certificate are stored as Docker secrets, you can +easily scale the number of replicas used for running the proxy service. Docker +takes care of distributing the secrets to the replicas. + +Set up your CLI client with a [UCP client bundle](../../user-access/cli.md), +and deploy the service: + +```bash +docker stack deploy --compose-file docker-compose.yml demo +``` + +The service is now running. To test that everything is working correctly you +first need to update your `/etc/hosts` file to map `app.example.org` to the +IP address of a UCP node. + +In a production deployment, you'll have to create a DNS entry so that your +users can access the service using the domain name of your choice. +After doing that, you'll be able to access your service at: + +```bash +https://: +``` + +Where: +* `hostname` is the name you used with the `com.docker.lb.hosts` label. +* `https-port` is the port you've configured in the [UCP settings](../deploy/index.md). + +![Browser screenshot](../../../images/interlock-tls-2.png){: .with-border} + +Since we're using self-sign certificates in this example, client tools like +browsers display a warning that the connection is insecure. + +You can also test from the CLI: + +```bash +curl --insecure \ + --resolve :: \ + https://:/ping +``` + +If everything is properly configured you should get a JSON payload: + +```json +{"instance":"f537436efb04","version":"0.1","request_id":"5a6a0488b20a73801aa89940b6f8c5d2"} +``` + +Since the proxy uses SNI to decide where to route traffic, make sure you're +using a version of curl that includes the SNI header with insecure requests. +If this doesn't happen, curl displays an error saying that the SSL handshake +was aborterd. + + +## Let your service handle TLS + +You can also encrypt the traffic from end-users to your swarm service. + +![End-to-end encryption](../../../images/interlock-tls-3.png) + + +To do that, deploy your swarm service using the following docker-compose.yml file: + +```yml +version: "3.2" + +services: + demo: + image: ehazlett/docker-demo + command: --tls-cert=/run/secrets/cert.pem --tls-key=/run/secrets/key.pem + deploy: + replicas: 1 + labels: + com.docker.lb.hosts: app.example.org + com.docker.lb.network: demo-network + com.docker.lb.port: 8080 + com.docker.lb.ssl_passthrough: "true" + environment: + METADATA: end-to-end-TLS + networks: + - demo-network + secrets: + - source: app.example.org.cert + target: /run/secrets/cert.pem + - source: app.example.org.key + target: /run/secrets/key.pem + +networks: + demo-network: + driver: overlay +secrets: + app.example.org.cert: + file: ./app.example.org.cert + app.example.org.key: + file: ./app.example.org.key +``` + +Notice that we've update the service to start using the secrets with the +private key and certificate. The service is also labeled with +`com.docker.lb.ssl_passthrough: true`, signaling UCP to configure the proxy +service such that TLS traffic for `app.example.org` is passed to the service. + +Since the connection is fully encrypt from end-to-end, the proxy service +won't be able to add metadata such as version info or request ID to the +response headers. diff --git a/datacenter/ucp/3.0/guides/user/interlock/usage/websockets.md b/datacenter/ucp/3.0/guides/user/interlock/usage/websockets.md new file mode 100644 index 0000000000..ec2b1b46b5 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/interlock/usage/websockets.md @@ -0,0 +1,36 @@ +--- +title: Websockets +description: Learn how to use websocket in your swarm services when using the + layer 7 routing solution for UCP. +keywords: routing, proxy +--- + +In this example we will publish a service and configure support for websockets. + +First we will create an overlay network so that service traffic is isolated and secure: + +```bash +$> docker network create -d overlay demo +1se1glh749q1i4pw0kf26mfx5 +``` + +Next we will create the service with websocket endpoints: + +```bash +$> docker service create \ + --name demo \ + --network demo \ + --detach=false \ + --label com.docker.lb.hosts=demo.local \ + --label com.docker.lb.port=8080 \ + --label com.docker.lb.websocket_endpoints=/ws \ + ehazlett/websocket-chat +``` + +Note: for this to work you must have an entry for `demo.local` in your local hosts (i.e. `/etc/hosts`) file. +This uses the browser for websocket communication so you will need to have an entry or use a routable domain. + +Interlock will detect once the service is available and publish it. Once the tasks are running +and the proxy service has been updated the application should be available via `http://demo.local`. Open +two instances of your browser and you should see text on both instances as you type. + diff --git a/datacenter/ucp/3.0/guides/user/kubernetes/create-service-account.md b/datacenter/ucp/3.0/guides/user/kubernetes/create-service-account.md new file mode 100644 index 0000000000..3e7336fd62 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/kubernetes/create-service-account.md @@ -0,0 +1,89 @@ +--- +title: Create a service account for a Kubernetes app +description: Learn how to use a service account to give a Kubernetes workload access to cluster resources. +keywords: UCP, Docker EE, Kubernetes, authorization, access control, grant +--- + +Kubernetes enables access control for workloads by providing service accounts. +A service account represents an identity for processes that run in a pod. +When a process is authenticated through a service account, it can contact the +API server and access cluster resources. If a pod doesn't have an assigned +service account, it gets the `default` service account. +Learn about [managing service accounts](https://v1-8.docs.kubernetes.io/docs/admin/service-accounts-admin/). + +In Docker EE, you give a service account access to cluster resources by +creating a grant, the same way that you would give access to a user or a team. +Learn how to [grant access to cluster resources](../authorization/index.md). + +In this example, you create a service account and a grant that could be used +for an NGINX server. + +## Create the Kubernetes namespace + +A Kubernetes user account is global, but a service account is scoped to a +namespace, so you need to create a namespace before you create a service +account. + +1. Navigate to the **Namespaces** page and click **Create**. +2. In the **Object YAML** editor, append the following text. + ```yaml + metadata: + name: nginx + ``` +3. Click **Create**. +4. In the **nginx** namespace, click the **More options** icon, + and in the context menu, select **Set Context**, and click **Confirm**. + + ![](../images/create-service-account-1.png){: .with-border} + +5. Click the **Set context for all namespaces** toggle and click **Confirm**. + +## Create a service account + +Create a service account named `nginx-service-account` in the `nginx` +namespace. + +1. Navigate to the **Service Accounts** page and click **Create**. +2. In the **Namespace** dropdown, select **nginx**. +3. In the **Object YAML** editor, paste the following text. + ```yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: nginx-service-account + ``` +3. Click **Create**. + + ![](../images/create-service-account-2.png){: .with-border} + +## Create a grant + +To give the service account access to cluster resources, create a grant with +`Restricted Control` permissions. + +1. Navigate to the **Grants** page and click **Create Grant**. +2. In the left pane, click **Resource Sets**, and in the **Type** section, + click **Namespaces**. +3. Select the **nginx** namespace. +4. In the left pane, click **Roles**. In the **Role** dropdown, select + **Restricted Control**. +5. In the left pane, click **Subjects**, and select **Service Account**. + + > Service account subject type + > + > The **Service Account** option in the **Subject Type** section appears only + > when a Kubernetes namespace is present. + {: .important} + +6. In the **Namespace** dropdown, select **nginx**, and in the + **Service Account** dropdown, select **nginx-service-account**. +7. Click **Create**. + + ![](../images/create-service-account-3.png){: .with-border} + +Now `nginx-service-account` has access to all cluster resources that are +assigned to the `nginx` namespace. + +## Where to go next + +- [Deploy an ingress controller for a Kubernetes app](deploy-ingress-controller.md) \ No newline at end of file diff --git a/datacenter/ucp/3.0/guides/user/kubernetes/deploy-with-compose.md b/datacenter/ucp/3.0/guides/user/kubernetes/deploy-with-compose.md new file mode 100644 index 0000000000..64172cc844 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/kubernetes/deploy-with-compose.md @@ -0,0 +1,92 @@ +--- +title: Deploy a Compose-based app to a Kubernetes cluster +description: Use Docker Enterprise Edition to deploy a Kubernetes workload from a Docker compose. +keywords: UCP, Docker EE, Kubernetes, Compose +redirect_from: + - /ee/ucp/user/services/deploy-compose-on-kubernetes/ +--- + +Docker Enterprise Edition enables deploying [Docker Compose](/compose/overview.md/) +files to Kubernetes clusters. Starting in Compile file version 3.3, you use the +same `docker-compose.yml` file that you use for Swarm deployments, but you +specify **Kubernetes workloads** when you deploy the stack. The result is a +true Kubernetes app. + +## Get access to a Kubernetes namespace + +To deploy a stack to Kubernetes, you need a namespace for the app's resources. +Contact your Docker EE administrator to get access to a namespace. In this +example, the namespace has the name `lab-words`. +[Learn to grant access to a Kubernetes namespace](../authorization/grant-permissions/#kubernetes-grants). + +## Create a Kubernetes app from a Compose file + +In this example, you create a simple app, named "lab-words", by using a Compose +file. The following yaml defines the stack: + +```yaml +version: '3.3' + +services: + web: + build: web + image: dockerdemos/lab-web + volumes: + - "./web/static:/static" + ports: + - "80:80" + + words: + build: words + image: dockerdemos/lab-words + deploy: + replicas: 5 + endpoint_mode: dnsrr + resources: + limits: + memory: 16M + reservations: + memory: 16M + + db: + build: db + image: dockerdemos/lab-db +``` + +1. Open the UCP web UI, and in the left pane, click **Shared resources**. +2. Click **Stacks**, and in the **Stacks** page, click **Create stack**. +3. In the **Name** textbox, type "lab-words". +4. In the **Mode** dropdown, select **Kubernetes workloads**. +5. In the **Namespace** drowdown, select **lab-words**. +6. In the **docker-compose.yml** editor, paste the previous YAML. +7. Click **Create** to deploy the stack. + +## Inspect the deployment + +After a few minutes have passed, all of the pods in the `lab-words` deployment +are running. + +1. In the left pane, click **Pods**. Confirm that there are seven pods and + that their status is **Running**. If any have a status of **Pending**, + wait until they're all running. +2. Click one of the pods that has a name starting with **words**, and in the + details pane, scroll down to the **Pod IP** to view the pod's internal IP + address. + + ![](../images/deploy-compose-kubernetes-1.png){: .with-border} + +3. In the left pane, click **Load balancers** and find the **web-published** service. +4. Click the **web-published** service, and in the details pane, scroll down to the + **Spec** section. +5. Under **Ports**, click the URL to open the web UI for the `lab-words` app. + + ![](../images/deploy-compose-kubernetes-2.png){: .with-border} + +6. Look at the IP addresses that are displayed in each tile. The IP address + of the pod you inspected previously may be listed. If it's not, refresh the + page until you see it. + + ![](../images/deploy-compose-kubernetes-3.png){: .with-border} + +7. Refresh the page to see how the load is balanced across the pods. + diff --git a/datacenter/ucp/3.0/guides/user/kubernetes/index.md b/datacenter/ucp/3.0/guides/user/kubernetes/index.md new file mode 100644 index 0000000000..3daebde71d --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/kubernetes/index.md @@ -0,0 +1,258 @@ +--- +title: Deploy a workload to a Kubernetes cluster +description: Use Docker Enterprise Edition to deploy Kubernetes workloads from yaml files. +keywords: UCP, Docker EE, orchestration, Kubernetes, cluster +redirect_from: + - /ee/ucp/user/services/deploy-kubernetes-workload/ +--- + +The Docker EE web UI enables deploying your Kubernetes YAML files. In most +cases, no modifications are necessary to deploy on a cluster that's managed by +Docker EE. + +## Deploy an NGINX server + +In this example, a simple Kubernetes Deployment object for an NGINX server is +defined in YAML: + +```yaml +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 +``` + +The YAML specifies an earlier version of NGINX, which will be updated in a +later section. + +1. Open the Docker EE web UI, and in the left pane, click **Kubernetes**. +2. Click **Create** to open the **Create Kubernetes Object** page. +3. In the **Namespace** dropdown, select **default**. +4. In the **Object YAML** editor, paste the previous YAML. +5. Click **Create**. + +![](../images/deploy-kubernetes-workload-1.png){: .with-border} + +## Inspect the deployment + +The Docker EE web UI shows the status of your deployment when you click the +links in the **Kubernetes** section of the left pane. + +1. In the left pane. click **Controllers** to see the resource controllers + that Docker EE created for the NGINX server. +2. Click the **nginx-deployment** controller, and in the details pane, scroll + to the **Template** section. This shows the values that Docker EE used to + create the deployment. +3. In the left pane, click **Pods** to see the pods that are provisioned for + the NGINX server. Click one of the pods, and in the details pane, scroll to + the **Status** section to see that pod's phase, IP address, and other + properties. + +![](../images/deploy-kubernetes-workload-2.png){: .with-border} + +## Expose the server + +The NGINX server is up and running, but it's not accessible from outside of the +cluster. Add a `NodePort` service to expose the server on a specified port: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + type: NodePort + ports: + - port: 80 + nodePort: 32768 + selector: + app: nginx +``` + +The service connects the cluster's internal port 80 to the external port +32768. + +1. Repeat the previous steps and copy-paste the YAML that defines the `nginx` + service into the **Object YAML** editor on the + **Create Kubernetes Object** page. When you click **Create**, the + **Load Balancers** page opens. +2. Click the **nginx** service, and in the details pane, find the **Ports** + section. + + ![](../images/deploy-kubernetes-workload-3.png){: .with-border} + +3. Click the link that's labeled **URL** to view the default NGINX page. + +The YAML definition connects the service to the NGINX server by using the +app label `nginx` and a corresponding label selector. +[Learn about using a service to expose your app](https://v1-8.docs.kubernetes.io/docs/tutorials/kubernetes-basics/expose-intro/). + +## Update the deployment + +Update an existing deployment by applying an updated YAML file. In this +example, the server is scaled up to four replicas and updated to a later +version of NGINX. + +```yaml +... +spec: + progressDeadlineSeconds: 600 + replicas: 4 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: nginx + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: nginx + spec: + containers: + - image: nginx:1.8 +... +``` + +1. In the left pane, click **Controllers** and select **nginx-deployment**. +2. In the details pane, click **Configure**, and in the **Edit Deployment** + page, find the **replicas: 2** entry. +3. Change the number of replicas to 4, so the line reads **replicas: 4**. +4. Find the **image: nginx:1.7.9** entry and change it to **image: nginx:1.8**. + + ![](../images/deploy-kubernetes-workload-4.png){: .with-border} + +5. Click **Save** to update the deployment with the new YAML. +6. In the left pane, click **Pods** to view the newly created replicas. + + ![](../images/deploy-kubernetes-workload-5.png){: .with-border} + +## Use the CLI to deploy Kubernetes objects + +With Docker EE, you deploy your Kubernetes objects on the command line by using +`kubectl`. [Install and set up kubectl](https://v1-8.docs.kubernetes.io/docs/tasks/tools/install-kubectl/). + +Use a client bundle to configure your client tools, like Docker CLI and `kubectl` +to communicate with UCP instead of the local deployments you might have running. +[Get your client bundle by using the Docker EE web UI or the command line](../user-access/cli.md). + +When you have the client bundle set up, you can deploy a Kubernetes object +from YAML. + +```yaml +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + type: NodePort + ports: + - port: 80 + nodePort: 32768 + selector: + app: nginx +``` + +Save the previous YAML to a file named "deployment.yaml", and use the following +command to deploy the NGINX server: + +```bash +kubectl apply -f deployment.yaml +``` + +## Inspect the deployment + +Use the `describe deployment` option to inspect the deployment: + +```bash +kubectl describe deployment nginx-deployment +``` + +Also, you can use the Docker EE web UI to see the deployment's pods and +controllers. + +## Update the deployment + +Update an existing deployment by applying an updated YAML file. + +Edit deployment.yaml and change the following lines: + +- Increase the number of replicas to 4, so the line reads **replicas: 4**. +- Update the NGINX version by specifying **image: nginx:1.8**. + +Save the edited YAML to a file named "update.yaml", and use the following +command to deploy the NGINX server: + +```bash +kubectl apply -f update.yaml +``` + +Check that the deployment was scaled out by listing the deployments in the +cluster: + +```bash + kubectl get deployments +``` + +You should see four pods in the deployment: + +```bash +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +nginx-deployment 4 4 4 4 2d +``` + +Check that the pods are running the updated image: + +```bash +kubectl describe deployment nginx-deployment | grep -i image +``` + +You should see the currently running image: + +```bash + Image: nginx:1.8 +``` + diff --git a/datacenter/ucp/3.0/guides/user/kubernetes/install-cni-plugin.md b/datacenter/ucp/3.0/guides/user/kubernetes/install-cni-plugin.md new file mode 100644 index 0000000000..b16cf194d8 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/kubernetes/install-cni-plugin.md @@ -0,0 +1,93 @@ +--- +title: Install a CNI plugin +description: Learn how to install a Container Networking Interface plugin on Docker Universal Control Plane. +keywords: ucp, cli, administration, kubectl, Kubernetes, cni, Container Networking Interface, flannel, weave, ipip, calico +--- + +With Docker Universal Control Plane, you can install a third-party Container +Networking Interface (CNI) plugin when you install UCP, by using the +`--cni-installer-url` option. By default, Docker EE installs the built-in +[Calico](https://github.com/projectcalico/cni-plugin) plugin, but you can +override the default and install a plugin of your choice, +like [Flannel](https://github.com/coreos/flannel) or +[Weave](https://www.weave.works/). + +# Install UCP with a custom CNI plugin + +Modify the [UCP install command-line](../admin/install/index.md#step-4-install-ucp) +to add the `--cni-installer-url` [option](/reference/ucp/3.0/cli/install.md), +providing a URL for the location of the CNI plugin's YAML file: + +```bash +docker container run --rm -it --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + {{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} install \ + --host-address \ + --cni-installer-url \ + --interactive +``` + +You must provide a correct YAML installation file for the CNI plugin, but most +of the default files work on Docker EE with no modification. + +## YAML files for CNI plugins + +Use the following commands to get the YAML files for popular CNI plugins. + +- [Flannel](https://github.com/coreos/flannel) + ```bash + # Get the URL for the Flannel CNI plugin. + CNI_URL="https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml" + ``` +- [Weave](https://www.weave.works/) + ```bash + # Get the URL for the Weave CNI plugin. + CNI_URL="https://cloud.weave.works/k8s/net?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI5IiwgR2l0VmVyc2lvbjoidjEuOS4zIiwgR2l0Q29tbWl0OiJkMjgzNTQxNjU0NGYyOThjOTE5ZTJlYWQzYmUzZDA4NjRiNTIzMjNiIiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxOC0wMi0wN1QxMjoyMjoyMVoiLCBHb1ZlcnNpb246ImdvMS45LjIiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjgrIiwgR2l0VmVyc2lvbjoidjEuOC4yLWRvY2tlci4xNDMrYWYwODAwNzk1OWUyY2UiLCBHaXRDb21taXQ6ImFmMDgwMDc5NTllMmNlYWUxMTZiMDk4ZWNhYTYyNGI0YjI0MjBkODgiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE4LTAyLTAxVDIzOjI2OjE3WiIsIEdvVmVyc2lvbjoiZ28xLjguMyIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==" + ``` + If you have kubectl available, for example by using + [Docker for Mac](/docker-for-mac/kubernetes.md), you can use the following + command to get the URL for the [Weave](https://www.weave.works/) CNI plugin: + ```bash + # Get the URL for the Weave CNI plugin. + CNI_URL="https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + ``` +- [Romana](http://docs.romana.io/) + ```bash + # Get the URL for the Romana CNI plugin. + CNI_URL="https://raw.githubusercontent.com/romana/romana/master/docs/kubernetes/romana-kubeadm.yml" + ``` + +## Disable IP in IP overlay tunneling + +The Calico CNI plugin supports both overlay (IPIP) and underlay forwarding +technologies. By default, Docker UCP uses IPIP overlay tunneling. + +If you're used to managing applications at the network level through the +underlay visibility, or you want to reuse existing networking tools in the +underlay, you may want to disable the IPIP functionality. Run the following +commands on the Kubernetes master node to disable IPIP overlay tunneling. + +```bash +# Exec into the Calico Kubernetes controller container. +docker exec -it $(docker ps --filter name=k8s_calico-kube-controllers_calico-kube-controllers -q) sh + +# Download calicoctl +wget https://github.com/projectcalico/calicoctl/releases/download/v3.1.1/calicoctl + +# Get the IP pool configuration. +./calicoctl get ippool -o yaml > ippool.yaml + +# Edit the file: Disable IPIP in ippool.yaml by setting "ipipMode: Never". + +# Apply the edited file to the Calico plugin. +./calicoctl apply -f ippool.yaml + +``` + +These steps disable overlay tunneling, and Calico uses the underlay networking, +in environments where it's supported. + +## Where to go next + +- [Install UCP for production](../admin/install.md) +- [Deploy a workload to a Kubernetes cluster](../kubernetes.md) diff --git a/datacenter/ucp/3.0/guides/user/kubernetes/layer-7-routing.md b/datacenter/ucp/3.0/guides/user/kubernetes/layer-7-routing.md new file mode 100644 index 0000000000..c1d343e0b2 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/kubernetes/layer-7-routing.md @@ -0,0 +1,310 @@ +--- +title: Layer 7 routing +description: Learn how to route traffic to your Kubernetes workloads in + Docker Enterprise Edition. +keywords: UCP, Kubernetes, ingress, routing +redirect_from: + - /ee/ucp/kubernetes/deploy-ingress-controller/ +--- + +When you deploy a Kubernetes application, you may want to make it accessible +to users using hostnames instead of IP addresses. + +Kubernetes provides **ingress controllers** for this. This functionality is +specific to Kubernetes. If you're trying to route traffic to Swarm-based +applications, check [layer 7 routing with Swarm](../interlock/index.md). + +Use an ingress controller when you want to: + +* Give your Kubernetes app an externally-reachable URL. +* Load-balance traffic to your app. + +Kubernetes provides an NGINX ingress controller that you can use in Docker EE +without modifications. +Learn about [ingress in Kubernetes](https://v1-8.docs.kubernetes.io/docs/concepts/services-networking/ingress/). + +## Create a dedicated namespace + +1. Navigate to the **Namespaces** page, and click **Create**. +2. In the **Object YAML** editor, append the following text. + ```yaml + metadata: + name: ingress-nginx + ``` + + The finished YAML should look like this. + + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + ``` +3. Click **Create**. +4. In the **ingress-nginx** namespace, click the **More options** icon, + and in the context menu, select **Set Context**. + + ![](../images/deploy-ingress-controller-1.png){: .with-border} + +## Create a grant + +The default service account that's associated with the `ingress-nginx` +namespace needs access to Kubernetes resources, so create a grant with +`Restricted Control` permissions. + +1. From UCP, navigate to the **Grants** page, and click **Create Grant**. +2. Within the **Subject** pane, select **Service Account**. For the + **Namespace** select **ingress-nginx**, and select **default** for + the **Service Account**. Click **Next**. +3. Within the **Role** pane, select **Restricted Control**, and then click + **Next**. +4. Within the **Resource Set** pane, select the **Type** **Namespace**, and + select the **Apply grant to all existing and new namespaces** toggle. +5. Click **Create**. + +> Ingress and role-based access control +> +> Docker EE has an access control system that differs from Kubernetes RBAC. +> If your ingress controller has access control requirements, you need to +> create corresponding UCP grants. Learn to +> [migrate Kubernetes roles to Docker EE authorization](../authorization/migrate-kubernetes-roles.md). +{: .important} + +## Deploy NGINX ingress controller + +The cluster is ready for the ingress controller deployment, which has three +main components: + +- a simple HTTP server, named `default-http-backend`, +- an ingress controller, named `nginx-ingress-controller`, and +- a service that exposes the app, named `ingress-nginx`. + +Navigate to the **Create Kubernetes Object** page, and in the **Object YAML** +editor, paste the following YAML. + +```yaml +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: default-http-backend + labels: + app: default-http-backend + namespace: ingress-nginx +spec: + replicas: 1 + selector: + matchLabels: + app: default-http-backend + template: + metadata: + labels: + app: default-http-backend + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend + # Any image is permissable as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: gcr.io/google_containers/defaultbackend:1.4 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: default-http-backend + namespace: ingress-nginx + labels: + app: default-http-backend +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + app: default-http-backend +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-configuration + namespace: ingress-nginx + labels: + app: ingress-nginx +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tcp-services + namespace: ingress-nginx +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: udp-services + namespace: ingress-nginx +--- +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: nginx-ingress-controller + namespace: ingress-nginx +spec: + replicas: 1 + selector: + matchLabels: + app: ingress-nginx + template: + metadata: + labels: + app: ingress-nginx + annotations: + prometheus.io/port: '10254' + prometheus.io/scrape: 'true' + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + initContainers: + - command: + - sh + - -c + - sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range="1024 65535" + image: alpine:3.6 + imagePullPolicy: IfNotPresent + name: sysctl + securityContext: + privileged: true + containers: + - name: nginx-ingress-controller + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.17.1 + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + - --configmap=$(POD_NAMESPACE)/nginx-configuration + - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services + - --udp-services-configmap=$(POD_NAMESPACE)/udp-services + - --annotations-prefix=nginx.ingress.kubernetes.io + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + - name: https + containerPort: 443 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx + namespace: ingress-nginx +spec: + type: NodePort + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP + selector: + app: ingress-nginx +``` + +## Check your deployment + +The `default-http-backend` provides a simple service that serves a 404 page +at `/` and serves 200 on the `/healthz` endpoint. + +1. Navigate to the **Controllers** page and confirm that the + **default-http-backend** and **nginx-ingress-controller** objects are + scheduled. + + > Scheduling latency + > + > It may take several seconds for the HTTP backend and the ingress controller's + > `Deployment` and `ReplicaSet` objects to be scheduled. + {: .important} + + ![](../images/deploy-ingress-controller-2.png){: .with-border} + +2. When the workload is running, navigate to the **Load Balancers** page + and click the **ingress-nginx** service. + + ![](../images/deploy-ingress-controller-3.png){: .with-border} + +3. In the details pane, click the first URL in the **Ports** section. + + A new page opens, displaying `default backend - 404`. + +## Check your deployment from the CLI + +From the command line, confirm that the deployment is running by using +`curl` with the URL that's shown on the details pane of the **ingress-nginx** +service. + +```bash +curl -I http://:/ +``` + +This command returns the following result. + +``` +HTTP/1.1 404 Not Found +Server: nginx/1.13.8 +``` + +Test the server's health ping service by appending `/healthz` to the URL. + +```bash +curl -I http://:/healthz +``` + +This command returns the following result. + +``` +HTTP/1.1 200 OK +Server: nginx/1.13.8 +``` diff --git a/datacenter/ucp/3.0/guides/user/swarm/deploy-multi-service-app.md b/datacenter/ucp/3.0/guides/user/swarm/deploy-multi-service-app.md new file mode 100644 index 0000000000..eb7462c80c --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/swarm/deploy-multi-service-app.md @@ -0,0 +1,160 @@ +--- +title: Deploy a multi-service app +description: Learn how to deploy containerized applications on a cluster, with Docker Universal Control Plane. +keywords: ucp, deploy, application, stack, service, compose +redirect_from: + - /ee/ucp/user/services/ + - /ee/ucp/swarm/deploy-from-cli/ + - /ee/ucp/swarm/deploy-from-ui/ +--- + +Docker Universal Control Plane allows you to use the tools you already know, +like `docker stack deploy` to deploy multi-service applications. You can +also deploy your applications from the UCP web UI. + +In this example we'll deploy a multi-service application that allows users to +vote on whether they prefer cats or dogs. + +```yaml +version: "3" +services: + + # A Redis key-value store to serve as message queue + redis: + image: redis:alpine + ports: + - "6379" + networks: + - frontend + + # A PostgreSQL database for persistent storage + db: + image: postgres:9.4 + volumes: + - db-data:/var/lib/postgresql/data + networks: + - backend + + # Web UI for voting + vote: + image: dockersamples/examplevotingapp_vote:before + ports: + - 5000:80 + networks: + - frontend + depends_on: + - redis + + # Web UI to count voting results + result: + image: dockersamples/examplevotingapp_result:before + ports: + - 5001:80 + networks: + - backend + depends_on: + - db + + # Worker service to read from message queue + worker: + image: dockersamples/examplevotingapp_worker + networks: + - frontend + - backend + +networks: + frontend: + backend: + +volumes: + db-data: +``` + +## From the web UI + +To deploy your applications from the **UCP web UI**, on the left navigation bar +expand **Shared resources**, choose **Stacks**, and click **Create stack**. + +![Stack list](../../images/deploy-multi-service-app-1.png){: .with-border} + +Choose the name you want for your stack, and choose **Swarm services** as the +deployment mode. + +When you choose this option, UCP deploys your app using the +Docker swarm built-in orchestrator. If you choose 'Basic containers' as the +deployment mode, UCP deploys your app using the classic Swarm orchestrator. + +Then copy-paste the application definition in docker-compose.yml format. + +![Deploy stack](../../images/deploy-multi-service-app-2.png){: .with-border} + +Once you're done click **Create** to deploy the stack. + +## From the CLI + +To deploy the application from the CLI, start by configuring your Docker +CLI using a [UCP client bundle](../user-access/cli.md). + +Then, create a file named `docker-stack.yml` with the content of the yaml above, +and run: + + + +
    +
    +``` +docker stack deploy --compose-file voting_app +``` +
    +
    +``` +docker-compose --file docker-compose.yml --project-name voting_app up -d +``` +
    +
    + + +## Check your app + +Once the multi-service application is deployed, it shows up in the UCP web UI. +The 'Stacks' page shows that you've deployed the voting app. + +![Stack deployed](../../images/deploy-multi-service-app-3.png){: .with-border} + +You can also inspect the individual services of the app you deployed. For that, +click the **voting_app** to open the details pane, open **Inspect resources** and +choose **Services**, since this app was deployed with the built-in Docker swarm +orchestrator. + +![Service list](../../images/deploy-multi-service-app-4.png){: .with-border} + +You can also use the Docker CLI to check the status of your app: + +``` +docker stack ps voting_app +``` + +Great! The app is deployed so we can cast votes by accessing the service that's +listening on port 5000. +You don't need to know the ports a service listens to. You can +**click the voting_app_vote** service and click on the **Published endpoints** +link. + +![Voting app](../../images/deploy-multi-service-app-5.png){: .with-border} + +## Limitations + +When deploying applications from the web UI, you can't reference any external +files, no matter if you're using the built-in swarm orchestrator or classic +Swarm. For that reason, the following keywords are not supported: + +* build +* dockerfile +* env_file + +Also, UCP doesn't store the stack definition you've used to deploy the stack. +You can use a version control system for this. + diff --git a/datacenter/ucp/3.0/guides/user/swarm/deploy-to-collection.md b/datacenter/ucp/3.0/guides/user/swarm/deploy-to-collection.md new file mode 100644 index 0000000000..746f5b44f3 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/swarm/deploy-to-collection.md @@ -0,0 +1,103 @@ +--- +title: Deploy application resources to a collection +description: Learn how to manage user access to application resources by using collections. +keywords: UCP, authentication, user management, stack, collection, role, application, resources +redirect_from: + - /ee/ucp/user/services/deploy-stack-to-collection/ +--- + +Docker Universal Control Plane enforces role-based access control when you +deploy services. By default, you don't need to do anything, because UCP deploys +your services to a default collection, unless you specify another one. You can +customize the default collection in your UCP profile page. +[Learn more about access control and collections](../authorization/index.md). + +UCP defines a collection by its path. For example, a user's default collection +has the path `/Shared/Private/`. To deploy a service to a collection +that you specify, assign the collection's path to the *access label* of the +service. The access label is named `com.docker.ucp.access.label`. + +When UCP deploys a service, it doesn't automatically create the collections +that correspond with your access labels. An administrator must create these +collections and [grant users access to them](../authorization/grant-permissions.md). +Deployment fails if UCP can't find a specified collection or if the user +doesn't have access to it. + +## Deploy a service to a collection by using the CLI + +Here's an example of a `docker service create` command that deploys a service +to a `/Shared/database` collection: + +```bash +docker service create \ + --name redis_2 \ + --label com.docker.ucp.access.label="/Shared/database" + redis:3.0.6 +``` + +## Deploy services to a collection by using a Compose file + +You can also specify a target collection for a service in a Compose file. +In the service definition, add a `labels:` dictionary, and assign the +collection's path to the `com.docker.ucp.access.label` key. + +If you don't specify access labels in the Compose file, resources are placed in +the user's default collection when the stack is deployed. + +You can place a stack's resources into multiple collections, but most of the +time, you won't need to do this. + +Here's an example of a Compose file that specifies two services, WordPress and +MySQL, and gives them the access label `/Shared/wordpress`: + +```yaml +version: '3.1' + +services: + + wordpress: + image: wordpress + ports: + - 8080:80 + environment: + WORDPRESS_DB_PASSWORD: example + deploy: + labels: + com.docker.ucp.access.label: /Shared/wordpress + mysql: + image: mysql:5.7 + environment: + MYSQL_ROOT_PASSWORD: example + deploy: + labels: + com.docker.ucp.access.label: /Shared/wordpress +``` + +To deploy the application: + +1. In the UCP web UI, navigate to the **Stacks** page and click **Create Stack**. +2. Name the app "wordpress". +3. From the **Mode** dropdown, select **Swarm Services**. +4. Copy and paste the previous compose file into the **docker-compose.yml** editor. +5. Click **Create** to deploy the application, and click **Done** when the + deployment completes. + + ![](../../images/deploy-stack-to-collection-1.png){: .with-border} + +If the `/Shared/wordpress` collection doesn't exist, or if you don't have +a grant for accessing it, UCP reports an error. + +To confirm that the service deployed to the `/Shared/wordpress` collection: + +1. In the **Stacks** page, click **wordpress**. +2. In the details pane, click **Inspect Resource** and select **Services**. +3. On the **Services** page, click **wordpress_mysql**. In the details pane, + make sure that the **Collection** is `/Shared/wordpress`. + +![](../../images/deploy-stack-to-collection-2.png){: .with-border} + +## Where to go next + +- [Deploy a Compose-based app to a Kubernetes cluster](../kubernetes/deploy-with-compose.md) +- [Set metadata on a service (-l, –label)](/engine/reference/commandline/service_create/#set-metadata-on-a-service--l-label.md) +- [Docker object labels](/engine/userguide/labels-custom-metadata/.md) diff --git a/datacenter/ucp/3.0/guides/user/swarm/index.md b/datacenter/ucp/3.0/guides/user/swarm/index.md new file mode 100644 index 0000000000..76ff69bcaa --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/swarm/index.md @@ -0,0 +1,67 @@ +--- +title: Deploy a single service +description: Learn how to deploy services to a cluster managed by Universal Control Plane. +keywords: ucp, deploy, service +redirect_from: + - /ee/ucp/user/services/deploy-a-service/ +--- + +You can deploy and monitor your services from the UCP web UI. In this example +we'll deploy an [NGINX](https://www.nginx.com/) web server and make it +accessible on port `8000`. + +In your browser, navigate to the UCP web UI and click **Services**. On the +**Create a Service** page, click **Create Service** to configure the +NGINX service. + +Fill in the following fields: + +| Field | Value | +|:-------------|:-------------| +| Service name | nginx | +| Image name | nginx:latest | + +![](../../images/deploy-a-service-1.png){: .with-border} + +In the left pane, click **Network**. In the **Ports** section, +click **Publish Port** and fill in the following fields: + +| Field | Value | +|:---------------|:--------| +| Target port | 80 | +| Protocol | tcp | +| Publish mode | Ingress | +| Published port | 8000 | + +![](../../images/deploy-a-service-2.png){: .with-border} + +Click **Confirm** to map the ports for the NGINX service. + +Once you've specified the service image and ports, click **Create** to +deploy the service into the UCP cluster. + +![](../../images/deploy-a-service-3.png){: .with-border} + +Once the service is up and running, you'll be able to see the default NGINX +page, by going to `http://:8000`. In the **Services** list, click the +**nginx** service, and in the details pane, click the link under +**Published Endpoints**. + +![](../../images/deploy-a-service-4.png){: .with-border} + +Clicking the link opens a new tab that shows the default NGINX home page. + +![](../../images/deploy-a-service-5.png){: .with-border} + +## Use the CLI to deploy the service + +You can also deploy the same service from the CLI. Once you've set up your +[UCP client bundle](../user-access/cli.md), run: + +```bash +docker service create --name nginx \ + --publish mode=ingress,target=80,published=8000 \ + --label com.docker.ucp.access.owner= \ + nginx +``` + diff --git a/datacenter/ucp/3.0/guides/user/swarm/use-secrets.md b/datacenter/ucp/3.0/guides/user/swarm/use-secrets.md new file mode 100644 index 0000000000..1fb05bc865 --- /dev/null +++ b/datacenter/ucp/3.0/guides/user/swarm/use-secrets.md @@ -0,0 +1,193 @@ +--- +title: Manage secrets +description: Learn how to manage your passwords, certificates, and other secrets in a secure way with Docker EE +keywords: UCP, secret, password, certificate, private key +redirect_from: + - /ee/ucp/user/secrets/ +--- + +When deploying and orchestrating services, you often need to configure them +with sensitive information like passwords, TLS certificates, or private keys. + +Universal Control Plane allows you to store this sensitive information, also +known as *secrets*, in a secure way. It also gives you role-based access control +so that you can control which users can use a secret in their services +and which ones can manage the secret. + +UCP extends the functionality provided by Docker Engine, so you can continue +using the same workflows and tools you already use, like the Docker CLI client. +[Learn how to use secrets with Docker](/engine/swarm/secrets/). + +In this example, we're going to deploy a WordPress application that's composed of +two services: + +* wordpress: The service that runs Apache, PHP, and WordPress +* wordpress-db: a MySQL database used for data persistence + +Instead of configuring our services to use a plain text password stored in an +environment variable, we're going to create a secret to store the password. +When we deploy those services, we'll attach the secret to them, which creates +a file with the password inside the container running the service. +Our services will be able to use that file, but no one else will be able +to see the plain text password. + +To make things simpler, we're not going to configure the database service to +persist data. When the service stops, the data is lost. + +## Create a secret + +In the UCP web UI, open the **Swarm** section and click **Secrets**. + +![](../../images/manage-secrets-1.png){: .with-border} + +Click **Create Secret** to create a new secret. Once you create the secret +you won't be able to edit it or see the secret data again. + +![](../../images/manage-secrets-2.png){: .with-border} + +Assign a unique name to the secret and set its value. You can optionally define +a permission label so that other users have permission to use this secret. Also +note that a service and secret must have the same permission label, or both +must have no permission label at all, in order to be used together. + +In this example, the secret is named `wordpress-password-v1`, to make it easier +to track which version of the password our services are using. + + +## Use secrets in your services + +Before creating the MySQL and WordPress services, we need to create the network +that they're going to use to communicate with one another. + +Navigate to the **Networks** page, and create the `wordpress-network` with the +default settings. + +![](../../images/manage-secrets-3.png){: .with-border} + +Now create the MySQL service: + +1. Navigate to the **Services** page and click **Create Service**. Name the + service "wordpress-db", and for the **Task Template**, use the "mysql:5.7" + image. +2. In the left pane, click **Network**. In the **Networks** section, click + **Attach Network**, and in the dropdown, select **wordpress-network**. +3. In the left pane, click **Environment**. The Environment page is where you + assign secrets, environment variables, and labels to the service. +4. In the **Secrets** section, click **Use Secret**, and in the **Secret Name** + dropdown, select **wordpress-password-v1**. Click **Confirm** to associate + the secret with the service. +5. In the **Environment Variable** section, click **Add Environment Variable** and enter + the string "MYSQL_ROOT_PASSWORD_FILE=/run/secrets/wordpress-password-v1" to + create an environment variable that holds the path to the password file in + the container. +6. If you specified a permission label on the secret, you must set the same + permission label on this service. If the secret doesn't have a permission + label, then this service also can't have a permission label. +7. Click **Create** to deploy the MySQL service. + +This creates a MySQL service that's attached to the `wordpress-network` network +and that uses the `wordpress-password-v1` secret. By default, this creates a file +with the same name at `/run/secrets/` inside the container running +the service. + +We also set the `MYSQL_ROOT_PASSWORD_FILE` environment variable to configure +MySQL to use the content of the `/run/secrets/wordpress-password-v1` file as +the root password. + +![](../../images/manage-secrets-4.png){: .with-border} + +Now that the MySQL service is running, we can deploy a WordPress service that +uses MySQL as a storage backend: + +1. Navigate to the **Services** page and click **Create Service**. Name the + service "wordpress", and for the **Task Template**, use the + "wordpress:latest" image. +2. In the left pane, click **Network**. In the **Networks** section, click + **Attach Network**, and in the dropdown, select **wordpress-network**. +3. In the left pane, click **Environment**. +4. In the **Secrets** section, click **Use Secret**, and in the **Secret Name** + dropdown, select **wordpress-password-v1**. Click **Confirm** to associate + the secret with the service. +5. In the **Environment Variable**, click **Add Environment Variable** and enter + the string "WORDPRESS_DB_PASSWORD_FILE=/run/secrets/wordpress-password-v1" to + create an environment variable that holds the path to the password file in + the container. +6. Add another environment variable and enter the string + "WORDPRESS_DB_HOST=wordpress-db:3306". +7. If you specified a permission label on the secret, you must set the same + permission label on this service. If the secret doesn't have a permission + label, then this service also can't have a permission label. +8. Click **Create** to deploy the WordPress service. + +![](../../images/manage-secrets-4a.png){: .with-border} + +This creates the WordPress service attached to the same network as the MySQL +service so that they can communicate, and maps the port 80 of the service to +port 8000 of the cluster routing mesh. + +![](../../images/manage-secrets-5.png){: .with-border} + +Once you deploy this service, you'll be able to access it using the +IP address of any node in your UCP cluster, on port 8000. + +![](../../images/manage-secrets-6.png){: .with-border} + +## Update a secret + +If the secret gets compromised, you'll need to rotate it so that your services +start using a new secret. In this case, we need to change the password we're +using and update the MySQL and WordPress services to use the new password. + +Since secrets are immutable in the sense that you can't change the data +they store after they are created, we can use the following process to achieve +this: + +1. Create a new secret with a different password. +2. Update all the services that are using the old secret to use the new one + instead. +3. Delete the old secret. + +Let's rotate the secret we've created. Navigate to the **Secrets** page +and create a new secret named `wordpress-password-v2`. + +![](../../images/manage-secrets-7.png){: .with-border} + +This example is simple, and we know which services we need to update, +but in the real world, this might not always be the case. + +Click the **wordpress-password-v1** secret. In the details pane, +click **Inspect Resource**, and in the dropdown, select **Services**. + +![](../../images/manage-secrets-8.png){: .with-border} + +Start by updating the `wordpress-db` service to stop using the secret +`wordpress-password-v1` and use the new version instead. + +The `MYSQL_ROOT_PASSWORD_FILE` environment variable is currently set to look for +a file at `/run/secrets/wordpress-password-v1` which won't exist after we +update the service. So we have two options: + +1. Update the environment variable to have the value +`/run/secrets/wordpress-password-v2`, or +2. Instead of mounting the secret file in `/run/secrets/wordpress-password-v2` +(the default), we can customize it to be mounted in`/run/secrets/wordpress-password-v1` +instead. This way we don't need to change the environment variable. This is +what we're going to do. + +When adding the secret to the services, instead of leaving the **Target Name** +field with the default value, set it with `wordpress-password-v1`. This will make +the file with the content of `wordpress-password-v2` be mounted in +`/run/secrets/wordpress-password-v1`. + +Delete the `wordpress-password-v1` secret, and click **Update**. + +![](../../images/manage-secrets-9.png){: .with-border} + +Then do the same thing for the WordPress service. After this is done, the +WordPress application is running and using the new password. + +## Managing secrets through the CLI + +You can find additional documentation on managing secrets through the CLI at [How Docker manages secrets](/engine/swarm/secrets/#read-more-about-docker-secret-commands). + + diff --git a/develop/develop-images/build_enhancements.md b/develop/develop-images/build_enhancements.md index cfcd3a8b9b..2128707160 100644 --- a/develop/develop-images/build_enhancements.md +++ b/develop/develop-images/build_enhancements.md @@ -23,7 +23,7 @@ For more information on build options, see the reference guide on the [command l ## Limitations * BuildKit mode is incompatible with UCP and Swarm Classic -* Only supported on Linux +* Only supported for building Linux containers ## To enable buildkit builds diff --git a/develop/develop-images/dockerfile_best-practices.md b/develop/develop-images/dockerfile_best-practices.md index f0142e2f0b..6beacdc26b 100644 --- a/develop/develop-images/dockerfile_best-practices.md +++ b/develop/develop-images/dockerfile_best-practices.md @@ -22,8 +22,8 @@ A Docker image consists of read-only layers each of which represents a Dockerfile instruction. The layers are stacked and each one is a delta of the changes from the previous layer. Consider this `Dockerfile`: -```conf -FROM ubuntu:15.04 +```Dockerfile +FROM ubuntu:18.04 COPY . /app RUN make /app CMD python /app/app.py @@ -31,7 +31,7 @@ CMD python /app/app.py Each instruction creates one layer: -- `FROM` creates a layer from the `ubuntu:15.04` Docker image. +- `FROM` creates a layer from the `ubuntu:18.04` Docker image. - `COPY` adds files from your Docker client's current directory. - `RUN` builds your application with `make`. - `CMD` specifies what command to run within the container. @@ -101,38 +101,147 @@ Sending build context to Docker daemon 187.8MB ### Pipe Dockerfile through `stdin` -Docker 17.05 added the ability to build images by piping `Dockerfile` through -`stdin` with a _local or remote build-context_. In earlier versions, building an -image with a `Dockerfile` from `stdin` did not send the build-context. +Docker has the ability to build images by piping `Dockerfile` through `stdin` +with a _local or remote build context_. Piping a `Dockerfile` through `stdin` +can be useful to perform one-off builds without writing a Dockerfile to disk, +or in situations where the `Dockerfile` is generated, and should not persist +afterwards. -**Docker 17.04 and lower** +> The examples in this section use [here documents](http://tldp.org/LDP/abs/html/here-docs.html) +> for convenience, but any method to provide the `Dockerfile` on `stdin` can be +> used. +> +> For example, the following commands are equivalent: +> +> ```bash +> echo -e 'FROM busybox\nRUN echo "hello world"' | docker build - +> ``` +> +> ```bash +> docker build -< FROM busybox +> RUN echo "hello world" +> EOF +> ``` +> +> You can substitute the examples with your preferred approach, or the approach +> that best fits your use-case. + +#### Build an image using a Dockerfile from stdin, without sending build context + +Use this syntax to build an image using a `Dockerfile` from `stdin`, without +sending additional files as build context. The hyphen (`-`) takes the position +of the `PATH`, and instructs Docker to read the build context (which only +contains a `Dockerfile`) from `stdin` instead of a directory: + +```bash +docker build [OPTIONS] - ``` -docker build -t foo -< **Note**: Attempting to build a Dockerfile that uses `COPY` or `ADD` will fail +> if this syntax is used. The following example illustrates this: +> +> ```bash +> # create a directory to work in +> mkdir example +> cd example +> +> # create an example file +> touch somefile.txt +> +> docker build -t myimage:latest -< FROM busybox +> COPY somefile.txt . +> RUN cat /somefile.txt +> EOF +> +> # observe that the build fails +> ... +> Step 2/3 : COPY somefile.txt . +> COPY failed: stat /var/lib/docker/tmp/docker-builder249218248/somefile.txt: no such file or directory +> ``` + +#### Build from a local build context, using a Dockerfile from stdin + +Use this syntax to build an image using files on your local filesystem, but using +a `Dockerfile` from `stdin`. The syntax uses the `-f` (or `--file`) option to +specify the `Dockerfile` to use, using a hyphen (`-`) as filename to instruct +Docker to read the `Dockerfile` from `stdin`: + +```bash +docker build [OPTIONS] -f- PATH ``` -docker build -t foo . -f-< **Under the hood** +> +> When building an image using a remote Git repository as build context, Docker +> performs a `git clone` of the repository on the local machine, and sends +> those files as build context to the daemon. This feature requires `git` to be +> installed on the host where you run the `docker build` command. + ### Exclude with .dockerignore To exclude files not relevant to the build (without restructuring your source @@ -142,9 +251,9 @@ similar to `.gitignore` files. For information on creating one, see the ### Use multi-stage builds -[Multi-stage builds](multistage-build.md) (in [Docker 17.05](/release-notes/docker-ce/#17050-ce-2017-05-04) or higher) -allow you to drastically reduce the size of your final image, without struggling -to reduce the number of intermediate layers and files. +[Multi-stage builds](multistage-build.md) allow you to drastically reduce the +size of your final image, without struggling to reduce the number of intermediate +layers and files. Because an image is built during the final stage of the build process, you can minimize image layers by [leveraging build cache](#leverage-build-cache). @@ -161,8 +270,8 @@ frequently changed: A Dockerfile for a Go application could look like: -``` -FROM golang:1.9.2-alpine3.6 AS build +```Dockerfile +FROM golang:1.11-alpine AS build # Install tools required for project # Run `docker build --no-cache .` to update dependencies @@ -220,14 +329,13 @@ In older versions of Docker, it was important that you minimized the number of layers in your images to ensure they were performant. The following features were added to reduce this limitation: -- In Docker 1.10 and higher, only the instructions `RUN`, `COPY`, `ADD` create - layers. Other instructions create temporary intermediate images, and do not - directly increase the size of the build. +- Only the instructions `RUN`, `COPY`, `ADD` create layers. Other instructions + create temporary intermediate images, and do not increase the size of the build. -- In Docker 17.05 and higher, you can do [multi-stage builds](multistage-build.md) - and only copy the artifacts you need into the final image. This allows you to - include tools and debug information in your intermediate build stages without - increasing the size of the final image. +- Where possible, use [multi-stage builds](multistage-build.md), and only copy + the artifacts you need into the final image. This allows you to include tools + and debug information in your intermediate build stages without increasing the + size of the final image. ### Sort multi-line arguments @@ -238,12 +346,14 @@ review. Adding a space before a backslash (`\`) helps as well. Here’s an example from the [`buildpack-deps` image](https://github.com/docker-library/buildpack-deps): - RUN apt-get update && apt-get install -y \ - bzr \ - cvs \ - git \ - mercurial \ - subversion +```Dockerfile +RUN apt-get update && apt-get install -y \ + bzr \ + cvs \ + git \ + mercurial \ + subversion +``` ### Leverage build cache @@ -308,7 +418,7 @@ The following examples show the different acceptable formats. Explanatory commen > Strings with spaces must be quoted **or** the spaces must be escaped. Inner > quote characters (`"`), must also be escaped. -```conf +```Dockerfile # Set one or more individual labels LABEL com.example.version="0.0.1-beta" LABEL vendor1="ACME Incorporated" @@ -322,14 +432,14 @@ to combine all labels into a single `LABEL` instruction, to prevent extra layers from being created. This is no longer necessary, but combining labels is still supported. -```conf +```Dockerfile # Set multiple labels on one line LABEL com.example.version="0.0.1-beta" com.example.release-date="2015-02-12" ``` The above can also be written as: -```conf +```Dockerfile # Set multiple labels at once, using line-continuation characters to break long lines LABEL vendor=ACME\ Incorporated \ com.example.is-beta= \ @@ -368,26 +478,31 @@ know there is a particular package, `foo`, that needs to be updated, use Always combine `RUN apt-get update` with `apt-get install` in the same `RUN` statement. For example: - RUN apt-get update && apt-get install -y \ - package-bar \ - package-baz \ - package-foo - +```Dockerfile +RUN apt-get update && apt-get install -y \ + package-bar \ + package-baz \ + package-foo +``` Using `apt-get update` alone in a `RUN` statement causes caching issues and subsequent `apt-get install` instructions fail. For example, say you have a Dockerfile: - FROM ubuntu:14.04 - RUN apt-get update - RUN apt-get install -y curl +```Dockerfile +FROM ubuntu:18.04 +RUN apt-get update +RUN apt-get install -y curl +``` After building the image, all layers are in the Docker cache. Suppose you later modify `apt-get install` by adding extra package: - FROM ubuntu:14.04 - RUN apt-get update - RUN apt-get install -y curl nginx +```Dockerfile +FROM ubuntu:18.04 +RUN apt-get update +RUN apt-get install -y curl nginx +``` Docker sees the initial and modified instructions as identical and reuses the cache from previous steps. As a result the `apt-get update` is _not_ executed @@ -401,10 +516,12 @@ intervention. This technique is known as "cache busting". You can also achieve cache-busting by specifying a package version. This is known as version pinning, for example: - RUN apt-get update && apt-get install -y \ - package-bar \ - package-baz \ - package-foo=1.3.* +```Dockerfile +RUN apt-get update && apt-get install -y \ + package-bar \ + package-baz \ + package-foo=1.3.* +``` Version pinning forces the build to retrieve a particular version regardless of what’s in the cache. This technique can also reduce failures due to unanticipated changes @@ -413,20 +530,22 @@ in required packages. Below is a well-formed `RUN` instruction that demonstrates all the `apt-get` recommendations. - RUN apt-get update && apt-get install -y \ - aufs-tools \ - automake \ - build-essential \ - curl \ - dpkg-sig \ - libcap-dev \ - libsqlite3-dev \ - mercurial \ - reprepro \ - ruby1.9.1 \ - ruby1.9.1-dev \ - s3cmd=1.1.* \ - && rm -rf /var/lib/apt/lists/* +```Dockerfile +RUN apt-get update && apt-get install -y \ + aufs-tools \ + automake \ + build-essential \ + curl \ + dpkg-sig \ + libcap-dev \ + libsqlite3-dev \ + mercurial \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.* \ + && rm -rf /var/lib/apt/lists/* +``` The `s3cmd` argument specifies a version `1.1.*`. If the image previously used an older version, specifying the new one causes a cache bust of `apt-get @@ -522,10 +641,12 @@ variables specific to services you wish to containerize, such as Postgres’s Lastly, `ENV` can also be used to set commonly used version numbers so that version bumps are easier to maintain, as seen in the following example: - ENV PG_MAJOR 9.3 - ENV PG_VERSION 9.3.4 - RUN curl -SL http://example.com/postgres-$PG_VERSION.tar.xz | tar -xJC /usr/src/postgress && … - ENV PATH /usr/local/postgres-$PG_MAJOR/bin:$PATH +```Dockerfile +ENV PG_MAJOR 9.3 +ENV PG_VERSION 9.3.4 +RUN curl -SL http://example.com/postgres-$PG_VERSION.tar.xz | tar -xJC /usr/src/postgress && … +ENV PATH /usr/local/postgres-$PG_MAJOR/bin:$PATH +``` Similar to having constant variables in a program (as opposed to hard-coding values), this approach lets you change a single `ENV` instruction to @@ -541,11 +662,10 @@ FROM alpine ENV ADMIN_USER="mark" RUN echo $ADMIN_USER > ./mark RUN unset ADMIN_USER -CMD sh ``` ```bash -$ docker run --rm -it test sh echo $ADMIN_USER +$ docker run --rm test sh -c 'echo $ADMIN_USER' mark ``` @@ -567,7 +687,7 @@ CMD sh ``` ```bash -$ docker run --rm -it test sh echo $ADMIN_USER +$ docker run --rm test sh -c 'echo $ADMIN_USER' ``` @@ -591,9 +711,11 @@ the specifically required files change. For example: - COPY requirements.txt /tmp/ - RUN pip install --requirement /tmp/requirements.txt - COPY . /tmp/ +```Dockerfile +COPY requirements.txt /tmp/ +RUN pip install --requirement /tmp/requirements.txt +COPY . /tmp/ +``` Results in fewer cache invalidations for the `RUN` step, than if you put the `COPY . /tmp/` before it. @@ -604,16 +726,20 @@ delete the files you no longer need after they've been extracted and you don't have to add another layer in your image. For example, you should avoid doing things like: - ADD http://example.com/big.tar.xz /usr/src/things/ - RUN tar -xJf /usr/src/things/big.tar.xz -C /usr/src/things - RUN make -C /usr/src/things all +```Dockerfile +ADD http://example.com/big.tar.xz /usr/src/things/ +RUN tar -xJf /usr/src/things/big.tar.xz -C /usr/src/things +RUN make -C /usr/src/things all +``` And instead, do something like: - RUN mkdir -p /usr/src/things \ - && curl -SL http://example.com/big.tar.xz \ - | tar -xJC /usr/src/things \ - && make -C /usr/src/things all +```Dockerfile +RUN mkdir -p /usr/src/things \ + && curl -SL http://example.com/big.tar.xz \ + | tar -xJC /usr/src/things \ + && make -C /usr/src/things all +``` For other items (files, directories) that do not require `ADD`’s tar auto-extraction capability, you should always use `COPY`. @@ -628,16 +754,22 @@ default flags). Let's start with an example of an image for the command line tool `s3cmd`: - ENTRYPOINT ["s3cmd"] - CMD ["--help"] +```Dockerfile +ENTRYPOINT ["s3cmd"] +CMD ["--help"] +``` Now the image can be run like this to show the command's help: - $ docker run s3cmd +```bash +$ docker run s3cmd +``` Or using the right parameters to execute a command: - $ docker run s3cmd ls s3://mybucket +```bash +$ docker run s3cmd ls s3://mybucket +``` This is useful because the image name can double as a reference to the binary as shown in the command above. @@ -676,23 +808,31 @@ exec "$@" The helper script is copied into the container and run via `ENTRYPOINT` on container start: - COPY ./docker-entrypoint.sh / - ENTRYPOINT ["/docker-entrypoint.sh"] - CMD ["postgres"] +```Dockerfile +COPY ./docker-entrypoint.sh / +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["postgres"] +``` This script allows the user to interact with Postgres in several ways. It can simply start Postgres: - $ docker run postgres +```bash +$ docker run postgres +``` Or, it can be used to run Postgres and pass parameters to the server: - $ docker run postgres postgres --help +```bash +$ docker run postgres postgres --help +``` Lastly, it could also be used to start a totally different tool, such as Bash: - $ docker run --rm -it postgres bash +```bash +$ docker run --rm -it postgres bash +``` ### VOLUME diff --git a/develop/develop-images/multistage-build.md b/develop/develop-images/multistage-build.md index 2f3ae15005..022a1741db 100644 --- a/develop/develop-images/multistage-build.md +++ b/develop/develop-images/multistage-build.md @@ -131,13 +131,13 @@ intermediate artifacts are left behind, and not saved in the final image. By default, the stages are not named, and you refer to them by their integer number, starting with 0 for the first `FROM` instruction. However, you can -name your stages, by adding an `as ` to the `FROM` instruction. This +name your stages, by adding an `AS ` to the `FROM` instruction. This example improves the previous one by naming the stages and using the name in the `COPY` instruction. This means that even if the instructions in your Dockerfile are re-ordered later, the `COPY` doesn't break. ```conf -FROM golang:1.7.3 as builder +FROM golang:1.7.3 AS builder WORKDIR /go/src/github.com/alexellis/href-counter/ RUN go get -d -v golang.org/x/net/html COPY app.go . diff --git a/docker-compose_stage.yml b/docker-compose_stage.yml deleted file mode 100644 index 15e6453381..0000000000 --- a/docker-compose_stage.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: "2" -services: - docs-beta-stage: - build: - context: . - dockerfile: Dockerfile - image: docs/ - ports: - - "4000:4000" - volumes: - - "./:/usr/src/app" diff --git a/docker-for-aws/deploy.md b/docker-for-aws/deploy.md index dd00de0eca..41ddc52630 100644 --- a/docker-for-aws/deploy.md +++ b/docker-for-aws/deploy.md @@ -172,15 +172,15 @@ the docker command is automatically executed. ### Docker Stack deployment -To deploy complex multi-container apps, you can use the `docker stack deploy` command. You can either deploy a bundle on your machine over an SSH tunnel, or copy the `docker-compose.yml` file (for example using `scp`) to a manager node, SSH into the manager and then run `docker stack deploy` (if you have multiple managers, ensure that your session is on one that has the stack file). +To deploy complex multi-container apps, you can use the `docker stack deploy` command. You can either deploy a bundle on your machine over an SSH tunnel, or copy the `docker-compose.yml` file to a manager node via `scp` for example. You can then SSH into the manager node and run `docker stack deploy` with the `--compose-file` or `-c` option. See [docker stack deploy options](/engine/reference/commandline/stack_deploy/#options) for the list of different options. If you have multiple manager nodes, make sure you are logged in to the one with the stack file copy. For example: ```bash -docker stack deploy -f docker-compose.yml myapp +docker stack deploy --compose-file docker-compose.yml myapp ``` -A good sample app to test deployment of stacks is the [Docker voting app](https://github.com/docker/example-voting-app). +See [Docker voting app](https://github.com/docker/example-voting-app) for a good sample app to test stack deployments. By default, apps deployed with stacks do not have ports publicly exposed. Update port mappings for services, and Docker automatically wires up the underlying platform load balancers: diff --git a/docker-for-aws/release-notes.md b/docker-for-aws/release-notes.md index 2ebd3dfb14..a3249e7f4b 100644 --- a/docker-for-aws/release-notes.md +++ b/docker-for-aws/release-notes.md @@ -6,33 +6,19 @@ title: Docker for AWS release notes {% include d4a_buttons.md %} -## Enterprise Edition -[Docker Enterprise Edition Lifecycle](https://success.docker.com/Policies/Maintenance_Lifecycle){: target="_blank" class="_"} - -[Deploy Docker Enterprise Edition (EE) for AWS](https://hub.docker.com/editions/enterprise/docker-ee-aws?tab=description){: target="_blank" class="button outline-btn blank_"} - -### 17.06 EE - -- Docker engine 17.06 EE -- For Std/Adv external logging has been removed, as it is now handled by [UCP](https://docs.docker.com/datacenter/ucp/2.0/guides/configuration/configure-logs/){: target="_blank" class="_"} -- UCP 2.2.3 -- DTR 2.3.3 - -### 17.03 EE - -- Docker engine 17.03 EE -- UCP 2.1.5 -- DTR 2.2.7 - - > **Note** Starting with 18.02.0-CE EFS encryption option has been removed to prevent the [recreation of the EFS volume](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html){: target="_blank" class="_"}. ## Stable channel -### 18.06.1 CE - {{aws_blue_latest}} +### 18.09.2 +Release date: 2/24/2019 + +- Docker Engine upgraded to [Docker 18.09.2](https://github.com/docker/docker-ce/releases/tag/v18.09.2){: target="_blank" class="_"} + +### 18.06.1 CE + Release date: 8/24/2018 - Docker Engine upgraded to [Docker 18.06.1 CE](https://github.com/docker/docker-ce/releases/tag/v18.06.1-ce){: target="_blank" class="_"} @@ -139,3 +125,21 @@ Release date: 10/18/2017 ## Template archive If you are looking for templates from older releases, check out the [template archive](/docker-for-aws/archive.md). + +## Enterprise Edition +[Docker Enterprise Edition Lifecycle](https://success.docker.com/Policies/Maintenance_Lifecycle){: target="_blank" class="_"} + +[Deploy Docker Enterprise Edition (EE) for AWS](https://hub.docker.com/editions/enterprise/docker-ee-aws?tab=description){: target="_blank" class="button outline-btn blank_"} + +### 17.06 EE + +- Docker engine 17.06 EE +- For Std/Adv external logging has been removed, as it is now handled by [UCP](https://docs.docker.com/datacenter/ucp/2.0/guides/configuration/configure-logs/){: target="_blank" class="_"} +- UCP 2.2.3 +- DTR 2.3.3 + +### 17.03 EE + +- Docker engine 17.03 EE +- UCP 2.1.5 +- DTR 2.2.7 diff --git a/docker-for-azure/deploy.md b/docker-for-azure/deploy.md index a70e6454e9..7e17e16b37 100644 --- a/docker-for-azure/deploy.md +++ b/docker-for-azure/deploy.md @@ -149,15 +149,15 @@ This tool internally makes use of docker global-mode service that runs a task on ### Docker Stack deployment -To deploy complex multi-container apps, you can use the `docker stack deploy` command. You can either deploy a bundle on your machine over an SSH tunnel, or copy the `docker-compose.yml` file (for example using `scp`) to a manager node, SSH into the manager and then run `docker stack deploy` (if you have multiple managers, ensure that your session is on one that has the stack file). +To deploy complex multi-container apps, you can use the docker stack deploy command. You can either deploy a bundle on your machine over an SSH tunnel, or copy the docker-compose.yml file to a manager node via scp for example. You can then SSH into the manager node and run docker stack deploy with the --compose-file or -c option. See docker stack deploy options for the list of different options. If you have multiple manager nodes, make sure you are logged in to the one with the stack file copy. For example: ```bash -docker stack deploy -f docker-compose.yml myapp +docker stack deploy --compose-file docker-compose.yml myapp ``` -A good sample app to test deployment of stacks is the [Docker voting app](https://github.com/docker/example-voting-app). +See [Docker voting app](https://github.com/docker/example-voting-app) for a good sample app to test stack deployments. By default, apps deployed with stacks do not have ports publicly exposed. Update port mappings for services, and Docker automatically wires up the underlying platform load balancers: diff --git a/docker-for-azure/release-notes.md b/docker-for-azure/release-notes.md index a6dbe43590..a151dcea35 100644 --- a/docker-for-azure/release-notes.md +++ b/docker-for-azure/release-notes.md @@ -9,6 +9,11 @@ title: Docker for Azure Release Notes ## Enterprise Edition [Docker Enterprise Edition Lifecycle](https://success.docker.com/Policies/Maintenance_Lifecycle){: target="_blank"} +### 17.06.2-ee-19 EE +- Docker engine 17.06.2-ee-19 EE +- UCP 2.2.16 +- DTR 2.3.10 + ### 17.06 EE - Docker engine 17.06 EE @@ -24,10 +29,15 @@ title: Docker for Azure Release Notes ## Stable channel -### 18.06.1 CE - {{azure_blue_latest}} +### 18.09.2 +Release date: 2/24/2019 + + - Docker Engine upgraded to [Docker 18.09.2](https://github.com/docker/docker-ce/releases/tag/v18.09.2){: target="_blank" class="_"} + +### 18.06.1 CE + Release date: 8/24/2018 - Docker Engine upgraded to [Docker 18.06.1 CE](https://github.com/docker/docker-ce/releases/tag/v18.06.1-ce){: target="_blank" class="_"} diff --git a/docker-for-mac/edge-release-notes.md b/docker-for-mac/edge-release-notes.md index 0d5f65247f..39227d93bf 100644 --- a/docker-for-mac/edge-release-notes.md +++ b/docker-for-mac/edge-release-notes.md @@ -18,6 +18,56 @@ for Mac](install.md#download-docker-for-mac). ## Edge Releases of 2019 +### Docker Community Edition 2.0.4.1 2019-05-07 + +[Download](https://download.docker.com/mac/edge/34207/Docker.dmg) + +* Bug fixes and minor changes + - Upgrade QEMU from 2.8.0 to 3.1.0 to fix an emulation issue when building and running Java applications on Arm64 devices. [docker/for-mac#3646](https://github.com/docker/for-mac/issues/3646) + +### Docker Community Edition 2.0.4.0 2019-04-30 + +[Download](https://download.docker.com/mac/edge/33772/Docker.dmg) + +* Upgrades + - [Docker 19.03.0-beta3](https://github.com/docker/docker-ce/releases/tag/v19.03.0-beta3) + - [Docker Compose 1.24.0](https://github.com/docker/compose/releases/tag/1.24.0) + - [Compose on Kubernetes 0.4.22](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.22) + - [Kubernetes 1.14.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#changelog-since-v1141) + +* New + - App: Docker CLI plugin to configure, share, and install applications + + - Extend Compose files with metadata and parameters + - Reuse the same application across multiple environments (Development/QA/Staging/Production) + - Multi-orchestrator installation (Swarm or Kubernetes) + - Push/Pull/Promotion/Signing supported for application, with the same workflow as images + - Fully CNAB compliant + - Full support for Docker Contexts + + - Buildx (Tech Preview): Docker CLI plugin for extended build capabilities with BuildKit + + - Familiar UI from docker build + - Full BuildKit capabilities with container driver + - Multiple builder instance support + - Multi-node builds for cross-platform images (out-of-the-box support for linux/arm/v7 and linux/arm64) + - Parallel building of Compose files + - High-level build constructs with `bake` + +* Bug fixes and minor changes + - Truncate UDP DNS responses which are over 512 bytes in size + +### Docker Community Edition 2.0.3.0 2019-03-05 + +[Download](https://download.docker.com/mac/edge/31778/Docker.dmg) + +* Upgrades + - [Docker 18.09.3](https://github.com/docker/docker-ce/releases/tag/v18.09.3) + +* Bug fixes and minor changes + - Fixed port 8080 that was used on localhost when starting Kubernetes. Fixes [docker/for-mac#3522](https://github.com/docker/for-mac/issues/3522) + - Error message improvements, do not propose to run diagnostics / reset to factory default when not appropriate. + ### Docker Community Edition 2.0.2.1 2019-02-15 [Download](https://download.docker.com/mac/edge/31274/Docker.dmg) diff --git a/docker-for-mac/osxfs.md b/docker-for-mac/osxfs.md index e1e17bdecd..46bb98d29d 100644 --- a/docker-for-mac/osxfs.md +++ b/docker-for-mac/osxfs.md @@ -237,7 +237,7 @@ coherence: different files that don't exist on the shared volume. Even with a 2× speedup via latency reduction this use case still seems "slow". With caching enabled the performance increases around 3.5×, as described in -the [user-guided caching post](link-TODO). +the [user-guided caching post](https://blog.docker.com/2017/05/user-guided-caching-in-docker-for-mac/). We expect to see further performance improvements for rake with a "negative dcache" that keeps track of, in the Linux kernel itself, the files that do not exist. However, even this is not sufficient for the first time rake is run on a diff --git a/docker-for-windows/edge-release-notes.md b/docker-for-windows/edge-release-notes.md index 28c3ca60fa..07e42d1bec 100644 --- a/docker-for-windows/edge-release-notes.md +++ b/docker-for-windows/edge-release-notes.md @@ -18,6 +18,60 @@ for Windows](install.md#download-docker-for-windows). ## Edge Releases of 2019 +### Docker Community Edition 2.0.4.1 2019-05-07 + +[Download](https://download.docker.com/win/edge/34207/Docker%20Desktop%20Installer.exe) + +* Bug fixes and minor changes + - Upgrade QEMU from 2.8.0 to 3.1.0 to fix an emulation issue when building and running Java applications on Arm64 devices. [docker/for-mac#3646](https://github.com/docker/for-mac/issues/3646) + +### Docker Community Edition 2.0.4.0 2019-04-30 + +[Download](https://download.docker.com/win/edge/33772/Docker%20Desktop%20Installer.exe) + +* Upgrades + - [Docker 19.03.0-beta3](https://github.com/docker/docker-ce/releases/tag/v19.03.0-beta3) + - [Docker Compose 1.24.0](https://github.com/docker/compose/releases/tag/1.24.0) + - [Compose on Kubernetes 0.4.22](https://github.com/docker/compose-on-kubernetes/releases/tag/v0.4.22) + - [Kubernetes 1.14.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#changelog-since-v1141) + +* New + + - App: Docker CLI plugin to configure, share, and install applications + + - Extend Compose files with metadata and parameters + - Reuse the same application across multiple environments (Development/QA/Staging/Production) + - Multi-orchestrator installation (Swarm or Kubernetes) + - Push/Pull/Promotion/Signing supported for application, with the same workflow as images + - Fully CNAB compliant + - Full support for Docker Contexts + + - Buildx (Tech Preview): Docker CLI plugin for extended build capabilities with BuildKit + + - Familiar UI from docker build + - Full BuildKit capabilities with container driver + - Multiple builder instance support + - Multi-node builds for cross-platform images (out-of-the-box support for linux/arm/v7 and linux/arm64) + - Parallel building of compose files + - High-level build constructs with `bake` + +* Bug fixes and minor changes + + - Fix `Delete` of persistent volume claims + - Truncate UDP DNS responses which are over 512 bytes in size + +### Docker Community Edition 2.0.3.0 2019-03-05 + +[Download](https://download.docker.com/win/edge/31778/Docker%20Desktop%20Installer.exe) + +* Upgrades + - [Docker 18.09.3](https://github.com/docker/docker-ce/releases/tag/v18.09.3) + +* Bug fixes and minor changes + - Fixed docker not added to PATH after install in some cases + - Fixed port 8080 that was used on localhost when starting Kubernetes. + - Fixed "create issue" link in diagnostics windows. + ### Docker Community Edition 2.0.2.1 2019-02-15 [Download](https://download.docker.com/win/edge/31274/Docker%20Desktop%20Installer.exe) diff --git a/docker-for-windows/install.md b/docker-for-windows/install.md index b1fd76f684..b794ba4a91 100644 --- a/docker-for-windows/install.md +++ b/docker-for-windows/install.md @@ -47,7 +47,9 @@ Hub](https://hub.docker.com/editions/community/docker-ce-desktop-windows){: * Nested virtualization scenarios, such as running Docker Desktop for Windows on a VMWare or Parallels instance might work, but there are no guarantees. For more information, see [Running Docker Desktop for Windows in nested virtualization - scenarios](troubleshoot.md#running-docker-for-windows-in-nested-virtualization-scenarios) + scenarios](troubleshoot.md#running-docker-desktop-for-windows-in-nested-virtualization-scenarios) + +**Note**: Refer to the [Docker compatibility matrix](https://success.docker.com/article/compatibility-matrix) for complete Docker compatibility information with Windows Server. ### About Windows containers diff --git a/docker-for-windows/troubleshoot.md b/docker-for-windows/troubleshoot.md index 7fb64faaf9..29a6b8b3a6 100644 --- a/docker-for-windows/troubleshoot.md +++ b/docker-for-windows/troubleshoot.md @@ -377,7 +377,7 @@ Here are some steps to take if you encounter similar problems: ### Windows containers and Windows Server 2016 Docker Desktop is not supported on Windows Server 2016, instead you can use -[Docker Enterprise Basic Edition](/ee/index) at no aditional cost. +[Docker Enterprise Basic Edition](/ee/index.md) at no aditional cost. If you have questions about how to run Windows containers on Windows 10, see [Switch between Windows and Linux @@ -406,9 +406,9 @@ limitations with regard to networking due to the current implementation of Windows NAT (WinNAT). These limitations may potentially resolve as the Windows containers project evolves. -One thing you may encounter rather immediately is that published ports on -Windows containers do not do loopback to the local host. Instead, container -endpoints are only reachable from the host using the container's IP and port. +Windows containers work with published ports on localhost beginning with Windows 10 1809 using Docker Desktop for Windows as well as Windows Server 2019 / 1809 using Docker EE. + +If you are working with a version prior to `Windows 10 18.09`, published ports on Windows containers have an issue with loopback to the localhost. You can only reach container endpoints from the host using the container's IP and port. With `Windows 10 18.09`, containers work with published ports on localhost. So, in a scenario where you use Docker to pull an image and run a webserver with a command like this: diff --git a/docker-hub/builds/advanced.md b/docker-hub/builds/advanced.md index 81e57207b9..bf89e5c4e6 100644 --- a/docker-hub/builds/advanced.md +++ b/docker-hub/builds/advanced.md @@ -22,8 +22,8 @@ processes and do not affect your service's run environment. * `COMMIT_MSG`: the message from the commit being tested and built. * `DOCKER_REPO`: the name of the Docker repository being built. * `DOCKERFILE_PATH`: the dockerfile currently being built. -* `CACHE_TAG`: the Docker repository tag being built. -* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO`:`CACHE_TAG`.) +* `DOCKER_TAG`: the Docker repository tag being built. +* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO`:`DOCKER_TAG`.) If you are using these build environment variables in a `docker-compose.test.yml` file for automated testing, declare them in your `sut` diff --git a/docker-hub/builds/automated-testing.md b/docker-hub/builds/automated-testing.md index 8f1813ccc2..f0f24302b4 100644 --- a/docker-hub/builds/automated-testing.md +++ b/docker-hub/builds/automated-testing.md @@ -95,7 +95,7 @@ Docker repository, regardless of the Autotest settings. > **Note**: For security purposes, autotest on _external pull requests_ is limited on public repositories. Private images are not pulled and - environment variables defined in Docker Hub ware not + environment variables defined in Docker Hub are not available. Automated builds continue to work as usual. 9. Click **Save** to save the settings, or click **Save and build** to save and diff --git a/docker-hub/index.md b/docker-hub/index.md index ce1d4491da..e2ed1c6f4b 100644 --- a/docker-hub/index.md +++ b/docker-hub/index.md @@ -141,7 +141,7 @@ Congratulations! You've successfully: - Built a Docker container image on your computer - Pushed it to Docker Hub -### Next Steps +### Next steps - Create an [Organization](orgs.md) to use Docker Hub with your team. - Automatically build container images from code through [Builds](builds/index.md). diff --git a/docker-hub/upgrade.md b/docker-hub/upgrade.md index 54a133e39e..b842b3b93d 100644 --- a/docker-hub/upgrade.md +++ b/docker-hub/upgrade.md @@ -1,12 +1,12 @@ --- description: Upgrading your Docker Hub Plan keywords: Docker, docker, trusted, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation -title: Upgrading your Plan +title: Upgrade your Plan --- User and organization accounts maintain separate Docker Hub billing profiles. -### Upgrading your personal plan +### Upgrade your personal plan Docker Hub includes one private Docker Hub repository for free. If you need more private repositories, you can upgrade from your free account to a paid @@ -17,7 +17,7 @@ To upgrade: 2. Click Change Plan 3. Select your plan and provide your payment information to upgrade ![Upgrade Plan](images/index-upgrade-plan.png) -### Upgrading your organization's plan +### Upgrade your organization's plan To upgrade an Organization's plan: diff --git a/ee/dtr/admin/configure/enable-single-sign-on.md b/ee/dtr/admin/configure/enable-single-sign-on.md index eb3932b0c2..595a01eb4a 100644 --- a/ee/dtr/admin/configure/enable-single-sign-on.md +++ b/ee/dtr/admin/configure/enable-single-sign-on.md @@ -10,6 +10,9 @@ separately on the web UI of both applications. You can configure DTR to have single sign-on (SSO) with UCP, so that users only have to authenticate once. +> **Note**: After configuring single sign-on with DTR, users accessing DTR via +> `docker login` should create an [access token](/ee/dtr/user/access-tokens/) and use it to authenticate. + ## At installation time When installing DTR, use the `docker/dtr install --dtr-external-url ` diff --git a/ee/dtr/admin/configure/external-storage/index.md b/ee/dtr/admin/configure/external-storage/index.md index bc015d131c..8d5b0086e5 100644 --- a/ee/dtr/admin/configure/external-storage/index.md +++ b/ee/dtr/admin/configure/external-storage/index.md @@ -1,55 +1,61 @@ --- title: Configure DTR image storage description: Storage configuration for Docker Trusted Registry -keywords: storage drivers, NFS, Azure, S3 +keywords: dtr, storage drivers, NFS, Azure, S3 --- +## Configure your storage backend + By default DTR uses the local filesystem of the node where it is running to store your Docker images. You can configure DTR to use an external storage backend, for improved performance or high availability. ![architecture diagram](../../../images/configure-external-storage-1.svg) -If your DTR deployment only has a single replica, you can continue using the -local filesystem to store your Docker images. If your DTR deployment has -multiple replicas, for high availability, you need to ensure all replicas are -using the same storage backend. When a user pulls an image, the node serving -the request needs to have access to that image. +If your DTR deployment has a single replica, you can continue using the +local filesystem for storing your Docker images. If your DTR deployment has +multiple replicas, make sure all replicas are +using the same storage backend for high availability. Whenever a user pulls an image, the DTR +node serving the request needs to have access to that image. -DTR supports these storage systems: +DTR supports the following storage systems: * Local filesystem -* NFS -* Amazon S3 or compatible -* Google Cloud Storage -* Microsoft Azure Blob storage -* OpenStack Swift + * [NFS](nfs.md) + * [Bind Mount](/storage/bind-mounts/) + * [Volume](/storage/volumes/) +* Cloud Storage Providers + * [Amazon S3](s3.md) + * [Microsoft Azure](/registry/storage-drivers/azure/) + * [OpenStack Swift](/registry/storage-drivers/swift/) + * [Google Cloud Storage](/registry/storage-drivers/gcs/) -To configure the storage backend, you can log into the **DTR web UI** -as an administrator user, navigate to the **Settings** page, and choose -**Storage**. +> **Note**: Some of the previous links are meant to be informative and are not representative of DTR's implementation of these storage systems. + +To configure the storage backend, log in to the DTR web interface +as an admin, and navigate to **System > Storage**. ![dtr settings](../../../images/configure-external-storage-2.png){: .with-border} -The storage configuration page in the DTR web UI has options for the most -common configuration options, but you can also upload a yaml configuration file. +The storage configuration page gives you the most +common configuration options, but you have the option to upload a configuration file in `.yml`, `.yaml`, or `.txt` format. -The format of this configuration file is similar to the one used by -[Docker Registry](/registry/configuration.md). +See [Docker Registry Configuration](/registry/configuration.md) for configuration options. ## Local filesystem By default, DTR creates a volume named `dtr-registry-` to store your images using the local filesystem. You can customize the name and path of -the volume used by DTR, using the `docker/dtr reconfigure --dtr-storage-volume` -option. +the volume by using `docker/dtr install --dtr-storage-volume` or `docker/dtr reconfigure --dtr-storage-volume`. + +> When running DTR 2.5 (with experimental online garbage collection) and 2.6.0 to 2.6.3, there is an issue with [reconfiguring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. To work around the `--nfs-storage-url` flag issue, manually create a storage volume on each DTR node. If DTR is already installed in your cluster, [reconfigure DTR](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) with the `--dtr-storage-volume` flag using your newly-created volume. +{: .warning} If you're deploying DTR with high-availability, you need to use NFS or any other centralized storage backend so that all your DTR replicas have access to the same images. -To check how much space your images are taking in the local filesystem, you -can ssh into the node where DTR is deployed and run: +To check how much space your images are utilizing in the local filesystem, SSH into the DTR node and run: ```bash {% raw %} @@ -62,15 +68,28 @@ $(dirname $(docker volume inspect --format '{{.Mountpoint}}' dtr-registry- ``` -Use the format `nfs:///` for the NFS storage URL. To support **NFS v4**, you can now specify additional options when running [docker/dtr install](../../../../../reference/dtr/2.6/cli/install/) with `--nfs-storage-url`. +Use the format `nfs:///` for the NFS storage URL. To support **NFS v4**, you can now specify additional options when running [docker/dtr install](/reference/dtr/2.6/cli/install/) with `--nfs-storage-url`. When joining replicas to a DTR cluster, the replicas will pick up your storage configuration, so you will not need to specify it again. ### Reconfigure DTR to use NFS -When upgrading from a previous version of DTR that is already using -NFS, you can continue using the same configurations. To support **NFS v4**, additional NFS reconfiguration options have been added to the CLI. See [docker/dtr reconfigure](../../../../../reference/dtr/2.6/cli/reconfigure/) for more details. +To support **NFS v4**, more NFS options have been added to the CLI. See [New Features for 2.6.0 - CLI](/ee/dtr/release-notes/#260) for updates to [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/). +> When running DTR 2.5 (with experimental online garbage collection) and 2.6.0 to 2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. To work around the `--nfs-storage-url` flag issue, manually create a storage volume. If DTR is already installed in your cluster, [reconfigure DTR](/reference/dtr/2.6/cli/reconfigure/) with the `--dtr-storage-volume` flag using your newly-created volume. +> +> See [Reconfigure Using a Local NFS Volume]( https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) for Docker's recommended recovery strategy. +{: .warning} -To take advantage of the new DTR built-in support for NFS, you can -reconfigure DTR to use NFS: +#### DTR 2.6.4 + +In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. The following shows you how to reconfigure DTR using an NFSv4 volume as a storage backend: ```bash -docker run -it --rm {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} reconfigure \ - --nfs-storage-url +docker run --rm -it \ + docker/dtr:{{ page.dtr_version}} reconfigure \ + --ucp-url \ + --ucp-username \ + --nfs-storage-url + --async-nfs + --storage-migrated ``` To reconfigure DTR to stop using NFS storage, leave the `--nfs-storage-url` option @@ -63,9 +75,13 @@ docker run -it --rm {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version}} --nfs-storage-url "" ``` -If the IP address of your NFS server changes with the DNS address staying the same, you should still -reconfigure DTR to stop using NFS storage, and then add it back again. - ## Where to go next +- [Switch storage backends](storage-backend-migration.md) +- [Create a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/) +- [Restore from a backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) - [Configure where images are stored](index.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) + - [docker/dtr restore](/reference/dtr/2.6/cli/restore/) diff --git a/ee/dtr/admin/configure/external-storage/s3.md b/ee/dtr/admin/configure/external-storage/s3.md index da9a6011d0..a47ebea055 100644 --- a/ee/dtr/admin/configure/external-storage/s3.md +++ b/ee/dtr/admin/configure/external-storage/s3.md @@ -64,14 +64,12 @@ Here's an example of a policy like that: ``` - - ## Configure DTR Once you've created a bucket and user, you can configure DTR to use it. -Navigate to the **DTR web UI**, go to **Settings**, and choose **Storage**. +In your browser, navigate to `https:// Storage**. -![](../../../images/s3-1.png){: .with-border} +![](../../../images/configure-external-storage-2.png){: .with-border} Select the **S3** option, and fill-in the information about the bucket and user. @@ -133,3 +131,24 @@ DTR supports the following S3 regions: | us-gov-west-1 | | ca-central-1 | +## Update your S3 settings on the web interface + +When running 2.5.x (with experimental garbage collection) or 2.6.0-2.6.4, there is an issue with [changing your S3 settings on the web interface](/ee/dtr/release-notes#version-26) which leads to erased metadata. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. + +## Restore DTR with S3 + +To [restore DTR using your previously configured S3 settings](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage), use `docker/dtr restore` with `--dtr-use-default-storage` to keep your metadata. + +## Where to go next + +- [Create a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/) +- [Restore from a backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) +- [Configure where images are stored](index.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) + - [docker/dtr restore](/reference/dtr/2.6/cli/restore/) + + + + diff --git a/ee/dtr/admin/configure/external-storage/storage-backend-migration.md b/ee/dtr/admin/configure/external-storage/storage-backend-migration.md new file mode 100644 index 0000000000..6d883cc109 --- /dev/null +++ b/ee/dtr/admin/configure/external-storage/storage-backend-migration.md @@ -0,0 +1,68 @@ +--- +title: Switch storage backends +description: Storage backend migration for Docker Trusted Registry +keywords: dtr, storage drivers, local volume, NFS, Azure, S3, +--- + +Starting in DTR 2.6, switching storage backends initializes a new metadata store and erases your existing tags. This helps facilitate online garbage collection, which has been introduced in 2.5 as an experimental feature. In earlier versions, DTR would subsequently start a `tagmigration` job to rebuild tag metadata from the file layout in the image layer store. This job has been discontinued for DTR 2.5.x (with garbage collection) and DTR 2.6, as your storage backend could get out of sync with your DTR metadata, like your manifests and existing repositories. As best practice, DTR storage backends and metadata should always be moved, backed up, and restored together. + +## DTR 2.6.4 and above + +In DTR 2.6.4, a new flag, `--storage-migrated`, [has been added to `docker/dtr reconfigure`](/reference/dtr/2.6/cli/reconfigure/) which lets you indicate the migration status of your storage data during a reconfigure. If you are not worried about losing your existing tags, you can skip the recommended steps below and [perform a reconfigure](/reference/dtr/2.6/cli/reconfigure/). + +### Best practice for data migration + +Docker recommends the following steps for your storage backend and metadata migration: + +1. Disable garbage collection by selecting "Never" under **System > Garbage Collection**, so blobs referenced in the backup that you create continue to exist. See [Garbage collection](/ee/dtr/admin/configure/garbage-collection/) for more details. Make sure to keep it disabled while you're performing the metadata backup and migrating your storage data. + + ![](/ee/dtr/images/garbage-collection-0.png){: .img-fluid .with-border} + +2. [Back up your existing metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata). See [docker/dtr backup](/reference/dtr/2.6/cli/backup/) for CLI command description and options. + +3. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your current storage data to your new NFS server. + +4. [Restore DTR from your backup](/ee/dtr/admin/disaster-recovery/restore-from-backup/) and specify your new storage backend. See [docker/dtr destroy](/reference/dtr/2.6/cli/destroy/) and [docker/dtr restore](/reference/dtr/2.6/cli/backup/) for CLI command descriptions and options. + +5. With DTR restored from your backup and your storage data migrated to your new backend, garbage collect any dangling blobs using the following API request: + + ```bash + curl -u :$TOKEN -X POST "https:///api/v0/jobs" -H "accept: application/json" -H "content-type: application/json" -d "{ \"action": \"onlinegc_blobs\" }" + ``` + On success, you should get a `202 Accepted` response with a job `id` and other related details. + +This ensures any blobs which are not referenced in your previously created backup get destroyed. + +### Alternative option for data migration + +- If you have a long maintenance window, you can skip some steps from above and do the following: + + 1. Put DTR in "read-only" mode using the following API request: + + ```bash + curl -u :$TOKEN -X POST "https:///api/v0/meta/settings" -H "accept: application/json" -H "content-type: application/json" -d "{ \"readOnlyRegistry\": true }" + ``` + On success, you should get a `202 Accepted` response. + + 2. Migrate the contents of your current storage backend to the new one you are switching to. For example, upload your current storage data to your new NFS server. + + 3. [Reconfigure DTR](/reference/dtr/2.6/cli/reconfigure) while specifying the `--storage-migrated` flag to preserve your existing tags. + + +## DTR 2.6.0-2.6.4 and DTR 2.5 (with experimental garbage collection) + +Make sure to [perform a backup](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-data) before you change your storage backend when running DTR 2.5 (with online garbage collection) and 2.6.0-2.6.3. If you encounter an issue with lost tags, refer to the following resources: + * For changes to reconfigure and restore options in DTR 2.6, see [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) and [docker/dtr restore](/reference/dtr/2.6/cli/restore). + * For Docker's recommended recovery strategies, see [DTR 2.6 lost tags after reconfiguring storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage). + * For NFS-specific changes, see [Use NFS](nfs.md). + * For S3-specific changes, see [Learn how to configure DTR with Amazon S3](s3.md). + +Upgrade to [DTR 2.6.4](#dtr-264-and-above) and follow [best practice for data migration](#best-practice-for-data-migration) to avoid the wiped tags issue when moving from one NFS serverto another. + +## Where to go next + +- [Use NFS](nfs.md) +- [Use S3](s3.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) diff --git a/ee/dtr/admin/configure/set-up-vulnerability-scans.md b/ee/dtr/admin/configure/set-up-vulnerability-scans.md index f526503c79..30cbc5bd91 100644 --- a/ee/dtr/admin/configure/set-up-vulnerability-scans.md +++ b/ee/dtr/admin/configure/set-up-vulnerability-scans.md @@ -2,6 +2,8 @@ title: Set up Security Scanning in DTR description: Enable and configure Docker Security Scanning for Docker Trusted Registry. keywords: registry, scanning, security scan, vulnerability, CVE +redirect_from: + - /datacenter/dtr/2.2/guides/admin/configure/set-up-vulnerability-scans/ --- This page explains how to set up and enable Docker Security Scanning on an diff --git a/ee/dtr/admin/disaster-recovery/create-a-backup.md b/ee/dtr/admin/disaster-recovery/create-a-backup.md index 1796ec0477..75787441db 100644 --- a/ee/dtr/admin/disaster-recovery/create-a-backup.md +++ b/ee/dtr/admin/disaster-recovery/create-a-backup.md @@ -2,6 +2,7 @@ title: Create a backup description: Learn how to create a backup of Docker Trusted Registry, for disaster recovery. keywords: dtr, disaster recovery +toc_max_header: 3 --- {% assign metadata_backup_file = "dtr-metadata-backup.tar" %} @@ -43,7 +44,7 @@ command backs up the following data: ## Back up DTR data -To create a backup of DTR you need to: +To create a backup of DTR, you need to: 1. Back up image content 2. Back up DTR metadata @@ -53,22 +54,64 @@ restore. If you have not previously performed a backup, the web interface displa ![](/ee/dtr/images/backup-warning.png) +#### Find your replica ID + +Since you need your DTR replica ID during a backup, the following covers a few ways for you to determine your replica ID: + +##### UCP web interface + +You can find the list of replicas by navigating to **Shared Resources > Stacks** or **Swarm > Volumes** (when using [swarm mode](/engine/swarm/)) on the UCP web interface. + +##### UCP client bundle + +From a terminal [using a UCP client bundle]((/ee/ucp/user-access/cli/)), run: + +{% raw %} +```bash +docker ps --format "{{.Names}}" | grep dtr + +# The list of DTR containers with /-, e.g. +# node-1/dtr-api-a1640e1c15b6 +``` +{% endraw %} + + +##### SSH access + +Another way to determine the replica ID is to log into a DTR node using SSH and run the following: + +{% raw %} +```bash +REPLICA_ID=$(docker ps --format '{{.Names}}' -f name=dtr-rethink | cut -f 3 -d '-') +&& echo $REPLICA_ID +``` +{% endraw %} + ### Back up image content Since you can configure the storage backend that DTR uses to store images, -the way you backup images depends on the storage backend you're using. +the way you back up images depends on the storage backend you're using. If you've configured DTR to store images on the local file system or NFS mount, -you can backup the images by using ssh to log into a node where DTR is running, -and creating a tar archive of the [dtr-registry volume](../../architecture.md): +you can back up the images by using SSH to log into a DTR node, +and creating a `tar` archive of the [dtr-registry volume](../../architecture.md): + +#### Example backup command + +##### Local images {% raw %} ```none -sudo tar -cf {{ image_backup_file }} \ --C /var/lib/docker/volumes/ dtr-registry- +sudo tar -cf dtr-image-backup-$(date +%Y%m%d-%H_%M_%S).tar \ +/var/lib/docker/volumes/dtr-registry-$(docker ps --format '{{.Names}}' -f name=dtr-rethink | cut -f 3 -d '-') ``` {% endraw %} +###### Expected output +```bash +tar: Removing leading `/' from member names +``` + If you're using a different storage backend, follow the best practices recommended for that system. @@ -76,36 +119,52 @@ recommended for that system. ### Back up DTR metadata To create a DTR backup, load your UCP client bundle, and run the following -command, replacing the placeholders for the real values: +command. +#### Chained commands (Linux only) + +{% raw %} ```none +DTR_VERSION=$(docker container inspect $(docker container ps -f name=dtr-registry -q) | \ + grep -m1 -Po '(?<=DTR_VERSION=)\d.\d.\d'); \ +REPLICA_ID=$(docker ps --format '{{.Names}}' -f name=dtr-rethink | cut -f 3 -d '-'); \ +read -p 'ucp-url (The UCP URL including domain and port): ' UCP_URL; \ +read -p 'ucp-username (The UCP administrator username): ' UCP_ADMIN; \ read -sp 'ucp password: ' UCP_PASSWORD; \ docker run --log-driver none -i --rm \ --env UCP_PASSWORD=$UCP_PASSWORD \ - {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} backup \ - --ucp-url \ - --ucp-insecure-tls \ - --ucp-username \ - --existing-replica-id > {{ metadata_backup_file }} + docker/dtr:$DTR_VERSION backup \ + --ucp-username $UCP_ADMIN \ + --ucp-url $UCP_URL \ + --ucp-ca "$(curl https://${UCP_URL}/ca)" \ + --existing-replica-id $REPLICA_ID > dtr-metadata-${DTR_VERSION}-backup-$(date +%Y%m%d-%H_%M_%S).tar ``` +{% endraw %} -Where: +#### UCP field prompts -* `` is the url you use to access UCP. +* `` is the URL you use to access UCP. * `` is the username of a UCP administrator. -* `` is the id of the DTR replica to backup. +* `` is the DTR replica ID to back up. -This prompts you for the UCP password, backups up the DTR metadata and saves the -result into a tar archive. You can learn more about the supported flags in -the [reference documentation](/reference/dtr/2.5/cli/backup.md). +The above chained commands run through the following tasks: +1. Sets your DTR version and replica ID. To back up +a specific replica, set the replica ID manually by modifying the +`--existing-replica-id` flag in the backup command. +2. Prompts you for your UCP URL (domain and port) and admin username. +3. Prompts you for your UCP password without saving it to your disk or printing it on the terminal. +4. Retrieves the CA certificate for your specified UCP URL. To skip TLS verification, replace the `--ucp-ca` +flag with `--ucp-insecure-tls`. Docker does not recommend this flag for production environments. +5. Includes DTR version and timestamp to your `tar` backup file. -By default the backup command doesn't stop the DTR replica being backed up. -This allows performing backups without affecting your users. Since the replica -is not stopped, it's possible that happen while the backup is taking place, won't -be persisted. +You can learn more about the supported flags in +the [DTR backup reference documentation](/reference/dtr/2.6/cli/backup.md). -You can use the `--offline-backup` option to stop the DTR replica while taking -the backup. If you do this, remove the replica from the load balancing pool. +By default, the backup command does not pause the DTR replica being backed up to +prevent interruptions of user access to DTR. Since the replica +is not stopped, changes that happen during the backup may not be saved. +Use the `--offline-backup` flag to stop the DTR replica during the backup procedure. If you set this flag, +remove the replica from the load balancing pool to avoid user interruption. Also, the backup contains sensitive information like private keys, so you can encrypt the backup by running: @@ -117,6 +176,7 @@ gpg --symmetric {{ metadata_backup_file }} This prompts you for a password to encrypt the backup, copies the backup file and encrypts it. + ### Test your backups To validate that the backup was correctly performed, you can print the contents @@ -151,3 +211,13 @@ gpg -d {{ metadata_backup_file }} | tar -t You can also create a backup of a UCP cluster and restore it into a new cluster. Then restore DTR on that new cluster to confirm that everything is working as expected. + +## Where to go next +- [Configure your storage backend](/ee/dtr/admin/configure/external-storage/index.md) +- [Switch your storage backend](/ee/dtr/admin/configure/external-storage/storage-backend-migration.md) +- [Use NFS](/ee/dtr/admin/configure/external-storage/nfs.md) +- [Use S3](/ee/dtr/admin/configure/external-storage/s3.md) +- CLI reference pages + - [docker/dtr install](/reference/dtr/2.6/cli/install/) + - [docker/dtr reconfigure](/reference/dtr/2.6/cli/reconfigure/) + - [docker/dtr restore](/reference/dtr/2.6/cli/restore/) diff --git a/ee/dtr/admin/disaster-recovery/restore-from-backup.md b/ee/dtr/admin/disaster-recovery/restore-from-backup.md index 2c0b17d80c..2986726e80 100644 --- a/ee/dtr/admin/disaster-recovery/restore-from-backup.md +++ b/ee/dtr/admin/disaster-recovery/restore-from-backup.md @@ -59,8 +59,13 @@ the configuration created during a backup. Load your UCP client bundle, and run the following command, replacing the placeholders for the real values: -```none -read -sp 'ucp password: ' UCP_PASSWORD; \ +```bash +read -sp 'ucp password: ' UCP_PASSWORD; +``` + +This prompts you for the UCP password. Next, run the following to restore DTR from your backup. You can learn more about the supported flags in [docker/dtr restore](/reference/dtr/2.6/cli/restore). + +```bash docker run -i --rm \ --env UCP_PASSWORD=$UCP_PASSWORD \ {{ page.dtr_org }}/{{ page.dtr_repo }}:{{ page.dtr_version }} restore \ @@ -80,9 +85,18 @@ Where: * `` the id of the replica you backed up * ``the url that clients use to access DTR +#### DTR 2.5 and below + If you're using NFS as a storage backend, also include `--nfs-storage-url` as part of your restore command, otherwise DTR is restored but starts using a -local volume to persist your Docker images. +local volume to persist your Docker images. + +#### DTR 2.5 (with experimental online garbage collection) and DTR 2.6.0-2.6.3 + +> When running DTR 2.5 (with experimental online garbage collection) and 2.6.0 to 2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. To work around the `--nfs-storage-url`flag issue, manually create a storage volume on each DTR node. To [restore DTR](/reference/dtr/2.6/cli/restore/) from an existing backup, use `docker/dtr restore` with `--dtr-storage-volume` and the new volume. +> +> See [Restore to a Local NFS Volume]( https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretoalocalnfsvolume) for Docker's recommended recovery strategy. +{: .warning} ### Re-fetch the vulnerability database @@ -90,3 +104,7 @@ If you're scanning images, you now need to download the vulnerability database. After you successfully restore DTR, you can join new replicas the same way you would after a fresh installation. [Learn more](../configure/set-up-vulnerability-scans.md). + +## Where to go next + +- [docker/dtr restore](/reference/dtr/2.6/cli/restore/) diff --git a/ee/dtr/admin/install/index.md b/ee/dtr/admin/install/index.md index 67c77ad5e7..779a009003 100644 --- a/ee/dtr/admin/install/index.md +++ b/ee/dtr/admin/install/index.md @@ -54,7 +54,10 @@ information that is necessary. By default DTR is deployed with self-signed certificates, so your UCP deployment might not be able to pull images from DTR. Use the `--dtr-external-url :` optional flag while deploying -DTR, so that UCP is automatically reconfigured to trust DTR. +DTR, so that UCP is automatically reconfigured to trust DTR. Since [HSTS (HTTP Strict-Transport-Security) +header](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) is included in all API responses, +make sure to specify the FQDN (Fully Qualified Domain Name) of your DTR, or your browser may refuse +to load the web interface. ## Step 4. Check that DTR is running diff --git a/ee/dtr/admin/install/uninstall.md b/ee/dtr/admin/install/uninstall.md index af15b3aed2..0e8dda9ed5 100644 --- a/ee/dtr/admin/install/uninstall.md +++ b/ee/dtr/admin/install/uninstall.md @@ -17,7 +17,7 @@ You will be prompted for the UCP URL, UCP credentials, and which replica to destroy. To see what options are available in the destroy command, check the -[destroy command reference documentation](/reference/dtr/2.5/cli/destroy.md). +[destroy command reference documentation](/reference/dtr/2.6/cli/destroy.md). ## Where to go next diff --git a/ee/dtr/admin/monitor-and-troubleshoot/troubleshoot-dtr.md b/ee/dtr/admin/monitor-and-troubleshoot/troubleshoot-dtr.md index 58d203e04f..a8318eaa20 100644 --- a/ee/dtr/admin/monitor-and-troubleshoot/troubleshoot-dtr.md +++ b/ee/dtr/admin/monitor-and-troubleshoot/troubleshoot-dtr.md @@ -67,9 +67,11 @@ On a healthy cluster the output will be `[]`. Starting in DTR 2.5.5, you can run RethinkCLI from a separate image. First, set an environment variable for your DTR replica ID: +{% raw %} ```bash REPLICA_ID=$(docker inspect -f '{{.Name}}' $(docker ps -q -f name=dtr-rethink) | cut -f 3 -d '-') ``` +{% endraw %} RethinkDB stores data in different databases that contain multiple tables. Run the following command to get into interactive mode and query the contents of the DB: diff --git a/ee/dtr/images/configure-external-storage-2.png b/ee/dtr/images/configure-external-storage-2.png index aa4fe15908..ec259cce88 100644 Binary files a/ee/dtr/images/configure-external-storage-2.png and b/ee/dtr/images/configure-external-storage-2.png differ diff --git a/ee/dtr/images/delegate-image-signing-1.svg b/ee/dtr/images/delegate-image-signing-1.svg deleted file mode 100644 index 73ffb9a892..0000000000 --- a/ee/dtr/images/delegate-image-signing-1.svg +++ /dev/null @@ -1,179 +0,0 @@ - - - - - -delegate-image-signing-1 -Created with Sketch. - - - - - - - - IT ops team - - - QA team - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - dev/node - - - - dev/java - - - - dev/nginx - - - - - - - diff --git a/ee/dtr/images/remoteucp-addregistry.png b/ee/dtr/images/remoteucp-addregistry.png new file mode 100644 index 0000000000..18566e06cc Binary files /dev/null and b/ee/dtr/images/remoteucp-addregistry.png differ diff --git a/ee/dtr/images/remoteucp-enablesigning.png b/ee/dtr/images/remoteucp-enablesigning.png new file mode 100644 index 0000000000..f06eb7f72b Binary files /dev/null and b/ee/dtr/images/remoteucp-enablesigning.png differ diff --git a/ee/dtr/images/remoteucp-graphic.png b/ee/dtr/images/remoteucp-graphic.png new file mode 100644 index 0000000000..fd91a5ff0a Binary files /dev/null and b/ee/dtr/images/remoteucp-graphic.png differ diff --git a/ee/dtr/images/remoteucp-signedimage.png b/ee/dtr/images/remoteucp-signedimage.png new file mode 100644 index 0000000000..381d548372 Binary files /dev/null and b/ee/dtr/images/remoteucp-signedimage.png differ diff --git a/ee/dtr/images/sign-an-image-3.png b/ee/dtr/images/sign-an-image-3.png index c8253b31a6..a5c2030356 100644 Binary files a/ee/dtr/images/sign-an-image-3.png and b/ee/dtr/images/sign-an-image-3.png differ diff --git a/ee/dtr/release-notes.md b/ee/dtr/release-notes.md index 35fd1a9d99..362bad8d26 100644 --- a/ee/dtr/release-notes.md +++ b/ee/dtr/release-notes.md @@ -19,9 +19,92 @@ to upgrade your installation to the latest release. * [Version 2.5](#version-25) * [Version 2.4](#version-24) - # Version 2.6 +## 2.6.6 +(2019-5-6) + +### Security + +* Refer to [DTR image vulnerabilities](https://success.docker.com/article/dtr-image-vulnerabilities) for details regarding actions to be taken, timeline, and any status updates/issues/recommendations. + +### Enhancements + +* DTR now supports an option to keep your tag metadata when switching storage backends via the API. This is similar to the `--storage-migrated` option when performing an NFS reconfiguration via `docker run docker/dtr reconfigure --nfs-url ...`. (docker/dhe-deploy#10246) + - To use this option, first write your current storage settings to a JSON file via `curl ... /api/v0/admin/settings/registry > storage.json`. + - Next, add `keep_metadata: true` as a top-level key in the JSON you just created and modify it to contain your new storage settings. + - Finally, update your Registry settings with your modified JSON file via `curl -X PUT .../api/v0/admin/settings/registry -d @storage.json`. + +### Bug Fixes + +* Fixed an issue where replica version was inferred from DTR volume labels. (docker/dhe-deploy#10266) + +### Security +* Bumped the Golang version for DTR to 1.12.4. (docker/dhe-deploy#10290) +* Bumped the Alpine version of the base image to 3.9. (docker/dhe-deploy#10290) + +### Known issues + +* Docker Engine Enterprise Edition (Docker EE) Upgrade + * There are [important changes to the upgrade process](/ee/upgrade) that, if not correctly followed, can have impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before `18.09` to version `18.09` or greater. For DTR-specific changes, see [2.5 to 2.6 upgrade](/ee/dtr/admin/upgrade/#25-to-26-upgrade). +* Web Interface + * Poll mirroring for Docker plugins such as `docker/imagefs` is currently broken. (docker/dhe-deploy #9490) + * When viewing the details of a scanned image tag, the header may display a different vulnerability count from the layer details. (docker/dhe-deploy #9474) + * In order to set a tag limit for pruning purposes, immutability must be turned off for a repository. This limitation is not clear in the **Repository Settings** view. (docker/dhe-deploy #9554) +* Webhooks + * When configured for "Image promoted from repository" events, a webhook notification is triggered twice during an image promotion when scanning is enabled on a repository. (docker/dhe-deploy #9685) + * HTTPS webhooks do not go through HTTPS proxy when configured. (docker/dhe-deploy #9492) +* System + * When upgrading from `2.5` to `2.6`, the system will run a `metadatastoremigration` job after a successful upgrade. This is necessary for online garbage collection. If the three system attempts fail, you will have to retrigger the `metadatastoremigration` job manually. [Learn about manual metadata store migration](/ee/dtr/admin/upgrade/#25-to-26-upgrade). + +## 2.6.5 +(2019-4-11) + +### Bug Fixes +* Fixed a bug where the web interface was not rendering for non-admin users. +* Removed `Users` tab from the side navigation [#10222](https://github.com/docker/dhe-deploy/pull/10222) + +### Known issues + +* Docker Engine Enterprise Edition (Docker EE) Upgrade + * There are [important changes to the upgrade process](/ee/upgrade) that, if not correctly followed, can have impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before `18.09` to version `18.09` or greater. For DTR-specific changes, see [2.5 to 2.6 upgrade](/ee/dtr/admin/upgrade/#25-to-26-upgrade). +* Web Interface + * Poll mirroring for Docker plugins such as `docker/imagefs` is currently broken. (docker/dhe-deploy #9490) + * When viewing the details of a scanned image tag, the header may display a different vulnerability count from the layer details. (docker/dhe-deploy #9474) + * In order to set a tag limit for pruning purposes, immutability must be turned off for a repository. This limitation is not clear in the **Repository Settings** view. (docker/dhe-deploy #9554) +* Webhooks + * When configured for "Image promoted from repository" events, a webhook notification is triggered twice during an image promotion when scanning is enabled on a repository. (docker/dhe-deploy #9685) + * HTTPS webhooks do not go through HTTPS proxy when configured. (docker/dhe-deploy #9492) +* System + * When upgrading from `2.5` to `2.6`, the system will run a `metadatastoremigration` job after a successful upgrade. This is necessary for online garbage collection. If the three system attempts fail, you will have to retrigger the `metadatastoremigration` job manually. [Learn about manual metadata store migration](/ee/dtr/admin/upgrade/#25-to-26-upgrade). + +## 2.6.4 +(2019-3-28) + +### Enhancements + +* Added `--storage-migrated` option to reconfigure with migrated content when moving content to a new NFS URL. (ENGDTR-794) +* Added a job log status filter which allows users to exclude jobs that are not currently ***running***. (docker/dhe-deploy #10077) + +### Bug Fixes + +* If you have a repository in DTR 2.4 with manifest lists enabled, `docker pull` would fail on images that have been pushed to the repository after you upgrade to 2.5 and opt into garbage collection. This also applied when upgrading from 2.5 to 2.6. The issue has been fixed in DTR 2.6.4. (ENGDTR-330 and docker/dhe-deploy #10105) + +### Known issues + +* Docker Engine Enterprise Edition (Docker EE) Upgrade + * There are [important changes to the upgrade process](/ee/upgrade) that, if not correctly followed, can have impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before `18.09` to version `18.09` or greater. For DTR-specific changes, see [2.5 to 2.6 upgrade](/ee/dtr/admin/upgrade/#25-to-26-upgrade). + +* Web Interface + * Poll mirroring for Docker plugins such as `docker/imagefs` is currently broken. (docker/dhe-deploy #9490) + * When viewing the details of a scanned image tag, the header may display a different vulnerability count from the layer details. (docker/dhe-deploy #9474) + * In order to set a tag limit for pruning purposes, immutability must be turned off for a repository. This limitation is not clear in the **Repository Settings** view. (docker/dhe-deploy #9554) +* Webhooks + * When configured for "Image promoted from repository" events, a webhook notification is triggered twice during an image promotion when scanning is enabled on a repository. (docker/dhe-deploy #9685) + * HTTPS webhooks do not go through HTTPS proxy when configured. (docker/dhe-deploy #9492) +* System + * When upgrading from `2.5` to `2.6`, the system will run a `metadatastoremigration` job after a successful upgrade. This is necessary for online garbage collection. If the three system attempts fail, you will have to retrigger the `metadatastoremigration` job manually. [Learn about manual metadata store migration](/ee/dtr/admin/upgrade/#25-to-26-upgrade). + ## 2.6.3 (2019-2-28) @@ -43,6 +126,10 @@ to upgrade your installation to the latest release. * Poll mirroring for Docker plugins such as `docker/imagefs` is currently broken. (docker/dhe-deploy #9490) * When viewing the details of a scanned image tag, the header may display a different vulnerability count from the layer details. (docker/dhe-deploy #9474) * In order to set a tag limit for pruning purposes, immutability must be turned off for a repository. This limitation is not clear in the **Repository Settings** view. (docker/dhe-deploy #9554) + * Changing your S3 settings through the web interface will lead to erased metadata (ENGDTR-793). See [Restore to Cloud Storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage) for Docker's recommended recovery strategy. + +* CLI + * When reconfiguring and restoring DTR, specifying `--nfs-storage-url` will assume you are switching to a fresh storage backend and will wipe your existing tags (ENGDTR-794). See [Reconfigure Using a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) and [Restore to a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretoalocalnfsvolume) for Docker's recommended recovery strategies. * Webhooks * When configured for "Image promoted from repository" events, a webhook notification is triggered twice during an image promotion when scanning is enabled on a repository. (docker/dhe-deploy #9685) @@ -69,6 +156,10 @@ to upgrade your installation to the latest release. * Poll mirroring for Docker plugins such as `docker/imagefs` is currently broken. (docker/dhe-deploy #9490) * When viewing the details of a scanned image tag, the header may display a different vulnerability count from the layer details. (docker/dhe-deploy #9474) * In order to set a tag limit for pruning purposes, immutability must be turned off for a repository. This limitation is not clear in the **Repository Settings** view. (docker/dhe-deploy #9554) + * Changing your S3 settings through the web interface will lead to erased metadata (ENGDTR-793). See [Restore to Cloud Storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage) for Docker's recommended recovery strategy. + +* CLI + * When reconfiguring and restoring DTR, specifying `--nfs-storage-url` will assume you are switching to a fresh storage backend and will wipe your existing tags (ENGDTR-794). See [Reconfigure Using a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) and [Restore to a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretoalocalnfsvolume) for Docker's recommended recovery strategies. * Webhooks * When configured for "Image promoted from repository" events, a webhook notification is triggered twice during an image promotion when scanning is enabled on a repository. (docker/dhe-deploy #9685) @@ -100,6 +191,10 @@ to upgrade your installation to the latest release. * Poll mirroring for Docker plugins such as `docker/imagefs` is currently broken. (docker/dhe-deploy #9490) * When viewing the details of a scanned image tag, the header may display a different vulnerability count from the layer details. (docker/dhe-deploy #9474) * In order to set a tag limit for pruning purposes, immutability must be turned off for a repository. This limitation is not clear in the **Repository Settings** view. (docker/dhe-deploy #9554) + * Changing your S3 settings through the web interface will lead to erased metadata (ENGDTR-793). See [Restore to Cloud Storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage) for Docker's recommended recovery strategy. + +* CLI + * When reconfiguring and restoring DTR, specifying `--nfs-storage-url` will assume you are switching to a fresh storage backend and will wipe your existing tags (ENGDTR-794). See [Reconfigure Using a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) and [Restore to a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretoalocalnfsvolume) for Docker's recommended recovery strategies. * Webhooks * When configured for "Image promoted from repository" events, a webhook notification is triggered twice during an image promotion when scanning is enabled on a repository. (docker/dhe-deploy #9685) @@ -150,6 +245,10 @@ to upgrade your installation to the latest release. * Poll mirroring for Docker plugins such as `docker/imagefs` is currently broken. (docker/dhe-deploy #9490) * When viewing the details of a scanned image tag, the header may display a different vulnerability count from the layer details. (docker/dhe-deploy #9474) * In order to set a tag limit for pruning purposes, immutability must be turned off for a repository. This limitation is not clear in the **Repository Settings** view. (docker/dhe-deploy #9554) + * Changing your S3 settings through the web interface will lead to erased metadata (ENGDTR-793). See [Restore to Cloud Storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage) for Docker's recommended recovery strategy. + +* CLI + * When reconfiguring and restoring DTR, specifying `--nfs-storage-url` will assume you are switching to a fresh storage backend and will wipe your existing tags (ENGDTR-794). See [Reconfigure Using a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) and [Restore to a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretoalocalnfsvolume) for Docker's recommended recovery strategies. * Webhooks * When configured for "Image promoted from repository" events, a webhook notification is triggered twice during an image promotion when scanning is enabled on a repository. (docker/dhe-deploy #9685) @@ -170,6 +269,88 @@ to upgrade your installation to the latest release. # Version 2.5 + +> **Important DTR Upgrade Information** +> If you have manifest lists enabled on any of your repositories: +> +> Upgrade path from 2.5.x to 2.6: Upgrade directly to 2.6.4. + +## 2.5.11 + +(2019-05-06) + +### Security + +* Bumped the Golang version for DTR to 1.12.4. (docker/dhe-deploy #10301) +* Bumped the Alpine version of the base image to 3.9. (docker/dhe-deploy #10301) +* Bumped Python dependencies to address vulnerabilities. (docker/dhe-deploy #10308 and #10311) + +### Bug Fixes + +* Fixed an issue where read / write permissions were used when copying files into containers. (docker/dhe-deploy #10207) +* Fixed an issue where non-admin users could not access their repositories from the Repositories page on the web interface. (docker/dhe-deploy #10294) + +### Known Issues + +* Web Interface + * The web interface shows "This repository has no tags" in repositories where tags + have long names. As a workaround, reduce the length of the name for the + repository and tag. + * When deleting a repository with signed images, the DTR web interface no longer + shows instructions on how to delete trust data. + * There's no web interface support to update mirroring policies when rotating the TLS + certificates used by DTR. Use the API instead. + * The web interface for promotion policies is currently broken if you have a large number + of repositories. + * Clicking "Save & Apply" on a promotion policy doesn't work. +* Webhooks + * There is no webhook event for when an image is pulled. + * HTTPS webhooks do not go through HTTPS proxy when configured. (docker/dhe-deploy #9492) + * When configured for "Image promoted from repository" events, a webhook notification will be triggered twice during an image promotion when scanning is enabled on a repository. (docker/dhe-deploy #9685) +* Online garbage collection + * The events API won't report events when tags and manifests are deleted. + * The events API won't report blobs deleted by the garbage collection job. +* Docker EE Advanced features + * Scanning any new push after metadatastore migration will not yet work. + * Pushes to repos with promotion policies (repo as source) are broken when an + image has a layer over 100MB. + * On upgrade the scanningstore container may restart with this error message: + FATAL: database files are incompatible with server + +## 2.5.10 + +(2019-3-28) + +### Bug Fixes + +* If you have a repository in DTR 2.4 with manifest lists enabled, `docker pull` used to fail on images that were pushed to the repository after you upgraded to 2.5 and opted into garbage collection. This has been fixed in 2.5.10. (docker/dhe-deploy#10106) + +### Known Issues +* Web Interface + * The web interface shows "This repository has no tags" in repositories where tags + have long names. As a workaround, reduce the length of the name for the + repository and tag. + * When deleting a repository with signed images, the DTR web interface no longer + shows instructions on how to delete trust data. + * There's no web interface support to update mirroring policies when rotating the TLS + certificates used by DTR. Use the API instead. + * The web interface for promotion policies is currently broken if you have a large number + of repositories. + * Clicking "Save & Apply" on a promotion policy doesn't work. +* Webhooks + * There is no webhook event for when an image is pulled. + * HTTPS webhooks do not go through HTTPS proxy when configured. (docker/dhe-deploy #9492) + * When configured for "Image promoted from repository" events, a webhook notification will be triggered twice during an image promotion when scanning is enabled on a repository. (docker/dhe-deploy #9685) +* Online garbage collection + * The events API won't report events when tags and manifests are deleted. + * The events API won't report blobs deleted by the garbage collection job. +* Docker EE Advanced features + * Scanning any new push after metadatastore migration will not yet work. + * Pushes to repos with promotion policies (repo as source) are broken when an + image has a layer over 100MB. + * On upgrade the scanningstore container may restart with this error message: + FATAL: database files are incompatible with server + ## 2.5.9 (2019-2-28) @@ -600,6 +781,29 @@ specify `--log-protocol`. # Version 2.4 +> **Important DTR Upgrade Information** +> If you have manifest lists enabled on any of your repositories: +> +> Upgrade path from 2.4.x to 2.5: Do not opt into garbage collection, or directly upgrade to 2.5.10 if you need to opt into > garbage collection. +> Upgrade path from 2.5.x to 2.6: Upgrade directly to 2.6.4. + +## 2.4.12 + +(2019-05-06) + +### Security + +* Bumped the Golang version for DTR to 1.12.4. [docker/dhe-deploy #10303](https://github.com/docker/dhe-deploy/pull/10303) +* Bumped Python dependencies to address vulnerabilities. [docker/dhe-deploy#10309](https://github.com/docker/dhe-deploy/pull/10309) + +## 2.4.11 + +(2019-4-11) + +### Changelog + +* Bumped the Golang version that is used to build DTR to version 1.11.5. [docker/dhe-deploy#10155](https://github.com/docker/dhe-deploy/pull/10155) + ## 2.4.10 (2019-2-28) diff --git a/ee/dtr/user/access-dtr/configure-your-notary-client.md b/ee/dtr/user/access-dtr/configure-your-notary-client.md deleted file mode 100644 index e2880b9cb3..0000000000 --- a/ee/dtr/user/access-dtr/configure-your-notary-client.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Configure your Notary client -description: Learn how to configure your Notary client to push and pull images from Docker Trusted Registry. -keywords: registry, notary, trust ---- - -The Docker CLI client makes it easy to sign images but to streamline that -process it generates a set of private and public keys that are not tied -to your UCP account. This means that you'll be able to push and sign images to -DTR, but UCP won't trust those images since it doesn't know anything about -the keys you're using. - -So before signing and pushing images to DTR you should: - -* Configure the Notary CLI client -* Import your UCP private keys to the Notary client - -This allows you to start signing images with the private keys in your UCP -client bundle, that UCP can trace back to your user account. - -## System requirements - -The version of Notary you install, depends on the version of the Docker CLI -you're using: - -* Docker CLI 17.08 or older, use Notary 0.4.3. -* Docker CLI 17.09 or newer, use Notary 0.6.0. - -## Download the Notary CLI client - -If you're using Docker Desktop for Mac or Docker Desktop for Windows, you already have the -`notary` command installed. - -If you're running Docker on a Linux distribution, you can [download -Notary from Github](https://github.com/docker/notary/releases). As an example: - -```bash -# Get the latest binary -curl -L -o notary - -# Make it executable -chmod +x notary - -# Move it to a location in your path. Use the -Z option if you're using SELinux. -sudo mv -Z notary /usr/bin/ -``` - -## Configure the Notary CLI client - -Before you use the Notary CLI client, you need to configure it to make it -talk with the Notary server that's part of DTR. - -There's two ways to do this, either by passing flags to the notary command, -or using a configuration file. - -### With flags - -Run the Notary command with: - -```bash -notary --server https:// --trustDir ~/.docker/trust --tlscacert --help -``` - -Here's what the flags mean: - -| Flag | Purpose | -|:--------------|:----------------------------------------------------------------------------------------------------------------------------------| -| `--server` | The Notary server to query | -| `--trustDir` | Path to the local directory where trust metadata will be stored | -| `--tlscacert` | Path to the DTR CA certificate. If you've configured your system to trust the DTR CA certificate, you don't need to use this flag | - -To avoid having to type all the flags when using the command, you can set an -alias: - - - -
    -
    -``` -alias notary="notary --server https:// --trustDir ~/.docker/trust --tlscacert " -``` -
    -
    -
    -``` -set-alias notary "notary --server https:// --trustDir ~/.docker/trust --tlscacert " -``` -
    -
    -
    - -### With a configuration file - -You can also configure Notary by creating a `~/.notary/config.json` file with -the following content: - -```json -{ - "trust_dir" : "~/.docker/trust", - "remote_server": { - "url": "https://:", - "root_ca": "" - } -} -``` - -To validate your configuration, try running the `notary list` command on a -DTR repository that already has signed images: - -```bash -notary list // -``` - -The command should print a list of digests for each signed image on the -repository. - -## Import your UCP key - -The last step in configuring the Notary CLI client is to import the private -key of your UCP client bundle. -[Get a new client bundle if you don't have one yet](/datacenter/ucp/2.2/guides/user/access-ucp/cli-based-access.md). - -Import the private key in your UCP bundle into the Notary CLI client: - -```bash -notary key import -``` - -The private key is copied to `~/.docker/trust`, and you'll be prompted for a -password to encrypt it. - -You can validate what keys Notary knows about by running: - -```bash -notary key list -``` - -The key you've imported should be listed with the role `delegation`. diff --git a/ee/dtr/user/manage-images/sign-images/delegate-image-signing.md b/ee/dtr/user/manage-images/sign-images/delegate-image-signing.md deleted file mode 100644 index ae4813fcfa..0000000000 --- a/ee/dtr/user/manage-images/sign-images/delegate-image-signing.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Delegate image signing -description: Learn how to grant permission for others to sign images in Docker Trusted Registry. -keywords: registry, sign, trust ---- - -Instead of signing all the images yourself, you can delegate that task -to other users. - -A typical workflow looks like this: - -1. A repository owner creates a repository in DTR, and initializes the trust -metadata for that repository -3. Team members download a UCP client bundle and share their public key -certificate with the repository owner -4. The repository owner delegates signing to the team members -5. Team members can sign images using the private keys in their UCP client -bundles - -In this example, the IT ops team creates and initializes trust for the -`dev/nginx`. Then they allow users in the QA team to push and sign images in -that repository. - -![teams](../../../images/delegate-image-signing-1.svg) - -## Create a repository and initialize trust - -A member of the IT ops team starts by configuring their -[Notary CLI client](../../access-dtr/configure-your-notary-client.md). - -Then they create the `dev/nginx` repository, -[initialize the trust metadata](index.md) for that repository, and grant -write access to members of the QA team, so that they can push images to that -repository. - -## Ask for the public key certificates - -The member of the IT ops team then asks the QA team for their public key -certificate files that are part of their UCP client bundle. - -If they don't have a UCP client bundle, -[they can download a new one](/ee/ucp/user-access/cli.md). - -## Delegate image signing - -When delegating trust, you associate a public key certificate with a role name. -UCP requires that you delegate trust to two different roles: - -* `targets/releases` -* `targets/`, where `` is the UCP team the user belongs to - -In this example we'll delegate trust to `targets/releases` and `targets/qa`: - -```bash -# Delegate trust, and add that public key with the role targets/releases -notary delegation add dtr.example.org/dev/nginx targets/releases \ - --all-paths --publish - -# Delegate trust, and add that public key with the role targets/admin -notary delegation add dtr.example.org/dev/nginx targets/qa \ - --all-paths --publish -``` - -Now members from the QA team just have to [configure their Notary CLI client -with UCP private keys](../../access-dtr/configure-your-notary-client.md) -to be able to [push and sign images](index.md) into the `dev/nginx` repository. - - -## Where to go next - -- [Manage trusted repositories](manage-trusted-repositories.md) diff --git a/ee/dtr/user/manage-images/sign-images/index.md b/ee/dtr/user/manage-images/sign-images/index.md index c0166a9e76..9fa9511695 100644 --- a/ee/dtr/user/manage-images/sign-images/index.md +++ b/ee/dtr/user/manage-images/sign-images/index.md @@ -2,175 +2,244 @@ title: Sign an image description: Learn how to sign the images you push to Docker Trusted Registry. keywords: registry, sign, trust +redirect_from: +- /ee/dtr/user/manage-images/sign-images/delegate-image-signing/ +- /ee/dtr/user/manage-images/sign-images/manage-trusted-repositories/ --- -By default, when you push an image to DTR, the Docker CLI client doesn't -sign the image. +2 Key components of the Docker Trusted Registry is the Notary Server and Notary +Signer. These 2 containers give us the required components to use Docker Content +Trust right out of the box. [Docker Content +Trust](/engine/security/trust/content_trust/) allows us to sign image tags, +therefore whoever pulls the image can validate that they are getting the image +you create, or a forged one. + +As part of Docker Trusted Registry both the Notary server and the Registry +server are accessed through a front end Proxy, with both components sharing the +UCP's RBAC Engine. Therefore no additional configuration of the Docker Client +is required to use trust. + +Docker Content Trust is integrated into the Docker CLI, allowing you to +configure repositories, add signers and sign images all through the `$ docker +trust` command. ![image without signature](../../../images/sign-an-image-1.svg) -You can configure the Docker CLI client to sign the images you push to DTR. -This allows whoever pulls your image to validate if they are getting the image -you created, or a forged one. - -To sign an image, you can run: - -```bash -export DOCKER_CONTENT_TRUST=1 -docker push //: -``` - -This pushes the image to DTR and creates trust metadata. It also creates -public and private key pairs to sign the trust metadata, and pushes that metadata -to the Notary Server internal to DTR. - -![image with signature](../../../images/sign-an-image-2.svg) - - ## Sign images that UCP can trust -With the command above you'll be able to sign your DTR images, but UCP won't -trust them because it can't tie the private key you're using to sign the images -to your UCP account. +UCP has a feature which will prevent [untrusted +images](/ee/ucp/admin/configure/run-only-the-images-you-trust/) from being +deployed on the cluster. To use this feature, we first need to upload and sign +images into DTR. To tie the signed images back to UCP, we will actually sign the +images with private keys of UCP users. Inside of a UCP Client bundle the +`key.pem` can be used a User's private key, with the `cert.pem` being a public +key within a x509 certificate. To sign images in a way that UCP trusts them, you need to: -* Configure your Notary client -* Initialize trust metadata for the repository -* Delegate signing to the keys in your UCP client bundle +1. Download a Client Bundle for a User you want to use to sign the images. +2. Load the private key of the User into your workstations trust store. +3. Initialize trust metadata for the repository. +4. Delegate signing for that repository to the UCP User. +5. Sign the Image. -In this example we're going to pull an NGINX image from Docker Hub, -re-tag it as `dtr.example.org/dev/nginx:1`, push the image to DTR and sign it -in a way that is trusted by UCP. If you manage multiple repositories, you'll -have to do the same procedure for every one of them. +In this example we're going to pull a nginx image from the Docker Hub, re-tag it +as `dtr.example.com/dev/nginx:1`, push the image to DTR and sign it in a way +that is trusted by UCP. If you manage multiple repositories, you'll have to do +the same procedure for each repository. -### Configure your Notary client +### Import a UCP User's Private Key -Start by [configuring your Notary client](../../access-dtr/configure-your-notary-client.md). -This ensures the Docker an Notary CLI clients know about your UCP private keys. - -### Initialize the trust metadata - -Then you need to initialize the trust metadata for the new repository, and -the easiest way to do it is by pushing an image to that repository. Navigate to -the **DTR web UI**, and create a repository for your image. -In this example we've created the `dev/nginx` repository. - -From the Docker CLI client, pull an NGINX image from Docker Hub, -re-tag it, sign and push it to DTR. +Once you have download and extracted a UCP User's client bundle into your local +directory, you need to load the Private key into the local Docker trust store +`(~/.docker/trust)`. The name used here is purely metadata to help keep track of +which keys you have imported. ```bash -# Pull NGINX from Docker Hub -docker pull nginx:latest - -# Re-tag NGINX -docker tag nginx:latest dtr.example.org/dev/nginx:1 - -# Log into DTR -docker login dtr.example.org - -# Sign and push the image to DTR -export DOCKER_CONTENT_TRUST=1 -docker push dtr.example.org/dev/nginx:1 +$ docker trust key load --name jeff key.pem +Loading key from "key.pem"... +Enter passphrase for new jeff key with ID a453196: +Repeat passphrase for new jeff key with ID a453196: +Successfully imported key from key.pem ``` -This pushes the image to DTR and initializes the trust metadata for that -repository. +### Initialize the trust metadata and add the Public Key + +Next, we need to initiate trust metadata for a DTR repository. If you have not +done so already, navigate to the **DTR web UI**, and create a repository for +your image. In this example we've created the `prod/nginx` repository. + +As part of initiating the repository, we will add the public key of the UCP User +as a signer. You will be asked for a number of passphrases to protect the keys. +Make a note of these passphrases, and see [Managing Delegations in a Notary Server](/engine/security/trust/trust_delegation/#managing-delegations-in-a-notary-server) +to learn more about managing keys. + + +```bash +$ docker trust signer add --key cert.pem jeff dtr.example.com/prod/nginx +Adding signer "jeff" to dtr.example.com/prod/nginx... +Initializing signed repository for dtr.example.com/prod/nginx... +Enter passphrase for root key with ID 4a72d81: +Enter passphrase for new repository key with ID e0d15a2: +Repeat passphrase for new repository key with ID e0d15a2: +Successfully initialized "dtr.example.com/prod/nginx" +Successfully added signer: jeff to dtr.example.com/prod/nginx +``` + +We can inspect the trust metadata of the repository to make sure the User has +been added correctly. + +```bash +$ docker trust inspect --pretty dtr.example.com/prod/nginx + +No signatures for dtr.example.com/prod/nginx + +List of signers and their keys for dtr.example.com/prod/nginx + +SIGNER KEYS +jeff 927f30366699 + +Administrative keys for dtr.example.com/prod/nginx + + Repository Key: e0d15a24b741ab049470298734397afbea539400510cb30d3b996540b4a2506b + Root Key: b74854cb27cc25220ede4b08028967d1c6e297a759a6939dfef1ea72fbdd7b9a +``` + +### Sign the Image + +Finally, we will sign an image tag. These steps download the Image from the +Docker Hub, retag the Image to the DTR repository, push the image up to DTR, as +well as signing the tag with the UCP User's keys. + +```bash +$ docker pull nginx:latest + +$ docker tag nginx:latest dtr.example.com/prod/nginx:1 + +$ docker trust sign dtr.example.com/prod/nginx:1 +Signing and pushing trust data for local image dtr.example.com/prod/nginx:1, may overwrite remote trust data +The push refers to repository [dtr.example.com/prod/nginx] +6b5e2ed60418: Pushed +92c15149e23b: Pushed +0a07e81f5da3: Pushed +1: digest: sha256:5b49c8e2c890fbb0a35f6050ed3c5109c5bb47b9e774264f4f3aa85bb69e2033 size: 948 +Signing and pushing trust metadata +Enter passphrase for jeff key with ID 927f303: +Successfully signed dtr.example.com/prod/nginx:1 +``` + +We can inspect the trust metadata again to make sure the image tag has been +signed successfully. + +```bash +$ docker trust inspect --pretty dtr.example.com/prod/nginx:1 + +Signatures for dtr.example.com/prod/nginx:1 + +SIGNED TAG DIGEST SIGNERS +1 5b49c8e2c890fbb0a35f6050ed3c5109c5bb47b9e774264f4f3aa85bb69e2033 jeff + +List of signers and their keys for dtr.example.com/prod/nginx:1 + +SIGNER KEYS +jeff 927f30366699 + +Administrative keys for dtr.example.com/prod/nginx:1 + + Repository Key: e0d15a24b741ab049470298734397afbea539400510cb30d3b996540b4a2506b + Root Key: b74854cb27cc25220ede4b08028967d1c6e297a759a6939dfef1ea72fbdd7b9a +``` + +Or we can have a look at the signed image from within the **DTR UI**. ![DTR](../../../images/sign-an-image-3.png){: .with-border} -DTR shows that the image is signed, but UCP won't trust the image -because it doesn't have any information about the private keys used to sign -the image. +### Adding Additional Delegations -### Delegate trust to your UCP keys +If you wanted to sign this image with multiple UCP Users, maybe if you had a use +case where an image needed to be signed by a member of the `Security` team and a +member of the `Developers` team. Then you can add multiple signers to a +repository. -To sign images in a way that is trusted by UCP, you need to delegate trust, so -that you can sign images with the private keys in your UCP client bundle. - -When delegating trust you associate a public key certificate with a role name. -UCP requires that you delegate trust to two different roles: - -* `targets/releases` -* `targets/`, where `` is the UCP team the user belongs to - -In this example we'll delegate trust to `targets/releases` and `targets/admin`: +To do so, first load a private key from a UCP User of the Security Team's in to +the local Docker Trust Store. ```bash -# Delegate trust, and add that public key with the role targets/releases -notary delegation add --publish \ - dtr.example.org/dev/nginx \ - targets/releases \ - --all-paths - -# Delegate trust, and add that public key with the role targets/admin -notary delegation add --publish \ - dtr.example.org/dev/nginx \ - targets/admin \ - --all-paths +$ docker trust key load --name security key.pem +Loading key from "key.pem"... +Enter passphrase for new security key with ID 5ac7d9a: +Repeat passphrase for new security key with ID 5ac7d9a: +Successfully imported key from key.pem ``` -To push the new signing metadata to the Notary server, you'll have to push -the image again: +Upload the Public Key to the Notary Server and Sign the Image. You will be asked +for both the Developers passphrase, as well as the Security Users passphrase to +sign the tag. -```none -docker push dtr.example.org/dev/nginx:1 +```bash +$ docker trust signer add --key cert.pem security dtr.example.com/prod/nginx +Adding signer "security" to dtr.example.com/prod/nginx... +Enter passphrase for repository key with ID e0d15a2: +Successfully added signer: security to dtr.example.com/prod/nginx + +$ docker trust sign dtr.example.com/prod/nginx:1 +Signing and pushing trust metadata for dtr.example.com/prod/nginx:1 +Existing signatures for tag 1 digest 5b49c8e2c890fbb0a35f6050ed3c5109c5bb47b9e774264f4f3aa85bb69e2033 from: +jeff +Enter passphrase for jeff key with ID 927f303: +Enter passphrase for security key with ID 5ac7d9a: +Successfully signed dtr.example.com/prod/nginx:1 ``` -## Under the hood +Finally, we can check the tag again to make sure it is now signed by 2 +signatures. -Both Docker and Notary CLI clients interact with the Notary server to: +```bash +$ docker trust inspect --pretty dtr.example.com/prod/nginx:1 -* Keep track of the metadata of signed images -* Validate the signatures of the images you pull +Signatures for dtr.example.com/prod/nginx:1 -This metadata is also kept locally in `~/.docker/trust`. +SIGNED TAG DIGEST SIGNERS +1 5b49c8e2c890fbb0a35f6050ed3c5109c5bb47b9e774264f4f3aa85bb69e2033 jeff, security -```none -. -|-- private -| |-- root_keys -| | `-- 993ad247476da081e45fdb6c28edc4462f0310a55da4acf1e08404c551d94c14.key -| `-- tuf_keys -| `-- dtr.example.org -| `-- dev -| `-- nginx -| |-- 98a93b2e52c594de4d13d7268a4a5f28ade5fc1cb5f44cc3a4ab118572a86848.key -| `-- f7917aef77d0d4bf8204af78c0716dac6649346ebea1c4cde7a1bfa363c502ce.key -`-- tuf - `-- dtr.example.org - `-- dev - `-- nginx - |-- changelist - `-- metadata - |-- root.json - |-- snapshot.json - |-- targets.json - `-- timestamp.json +List of signers and their keys for dtr.example.com/prod/nginx:1 + +SIGNER KEYS +jeff 927f30366699 +security 5ac7d9af7222 + +Administrative keys for dtr.example.com/prod/nginx:1 + + Repository Key: e0d15a24b741ab049470298734397afbea539400510cb30d3b996540b4a2506b + Root Key: b74854cb27cc25220ede4b08028967d1c6e297a759a6939dfef1ea72fbdd7b9a ``` -The `private` directory contains the private keys the Docker CLI client uses -to sign the images. Make sure you create backups of this directory so that -you don't lose your signing keys. +For more advanced use cases like this, see [Delegations for content trust](/engine/security/trust/trust_delegation/). -The Docker and Notary CLI clients integrate with Yubikey. If you have a Yubikey -plugged in when initializing trust for a repository, the root key is stored on -the Yubikey instead of in the trust directory. -When you run any command that needs the `root` key, Docker and Notary CLI -clients look on the Yubikey first, and use the trust directory as a fallback. +## Delete trust data -The `tuf` directory contains the trust metadata for the images you've -signed. For each repository there are four files. +If an Administrator wants to delete a DTR repository that contains Trust +metadata, they will be prompted to delete the trust metadata first before the +repository can be removed. -| File | Description | -|:-----------------|:--------------------------------------------------------------------------------------------------------------------------| -| `root.json` | Has data about other keys and their roles. This data is signed by the root key. | -| `targets.json` | Has data about the digest and size for an image. This data is signed by the target key. | -| `snapshot.json` | Has data about the version number of the root.json and targets.json files. This data is signed by the snapshot key. | -| `timestamp.json` | Has data about the digest, size, and version number for the snapshot.json file. This data is signed by the timestamp key. | +To delete trust metadata we need to use the Notary CLI. For information on how +to download and configure the Notary CLI head +[here](/engine/security/trust/trust_delegation/#configuring-the-notary-client) -[Learn more about trust metadata](/notary/service_architecture.md). + +```bash +$ notary delete dtr.example.com/prod/nginx --remote +Deleting trust data for repository dtr.example.com/prod/nginx +Enter username: admin +Enter password: +Successfully deleted local and remote trust data for repository dtr.example.com/prod/nginx +``` + +If you don't include the `--remote` flag, Notary deletes local cached content +but will not delete data from the Notary server. ## Where to go next -* [Delegate image signing](delegate-image-signing.md) +* [Automating Docker Content + Trust](/engine/security/trust/trust_automation/) +* [Using Docker Content Trust with a Remote UCP](./trust-with-remote-ucp/) diff --git a/ee/dtr/user/manage-images/sign-images/manage-trusted-repositories.md b/ee/dtr/user/manage-images/sign-images/manage-trusted-repositories.md deleted file mode 100644 index 2cee2d83d0..0000000000 --- a/ee/dtr/user/manage-images/sign-images/manage-trusted-repositories.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Manage trusted repositories -description: Learn how to use the Notary CLI client to manage trusted repositories -keywords: dtr, trust, notary, security ---- - -Once you -[configure the Notary CLI client](../../access-dtr/configure-your-notary-client.md), -you can use it to manage your private keys, list trust data from any repository -you have access to, authorize other team members to sign images, and rotate -keys if a private key has been compromised. - -## List trust data - -List the trust data for a repository by running: - -```bash -notary list // -``` - -You can get one of the following errors, or a list with the images that have -been signed: - -| Message | Description | -|:--------------------------------------------|:-----------------------------------------------------------------------------------------------------------------| -| `fatal: client is offline` | Either the repository server can't be reached, or your Notary CLI client is misconfigured | -| `fatal: does not have trust data` | There's no trust data for the repository. Either run `notary init` or sign and push an image to that repository. | -| `No targets present in this repository` | The repository has been initialized, but doesn't contain any signed images | - -## Initialize trust for a repository - -There's two ways to initialize trust data for a repository. You can either -sign and push an image to that repository: - -```bash -export DOCKER_CONTENT_TRUST=1 -docker push // -``` - -or - -``` -notary init // --publish -``` - -## Manage staged changes - -The Notary CLI client stages changes before publishing them to the server. -You can manage the changes that are staged by running: - -```bash -# Check what changes are staged -notary status // - -# Unstage a specific change -notary status // --unstage 0 - -# Alternatively, unstage all changes -notary status // --reset -``` - -When you're ready to publish your changes to the Notary server, run: - -```bash -notary publish // -``` - -## Delete trust data - -Administrator users can remove all signatures from a trusted repository by -running: - -```bash -notary delete // --remote -``` - -If you don't include the `--remote` flag, Notary deletes local cached content -but will not delete data from the Notary server. - - -## Change the passphrase for a key - -The Notary CLI client manages the keys used to sign the image metadata. To -list all the keys managed by the Notary CLI client, run: - -```bash -notary key list -``` - -To change the passphrase used to encrypt one of the keys, run: - -```bash -notary key passwd -``` - -## Rotate keys - -If one of the private keys is compromised you can rotate that key, so that -images that were signed with the key stop being trusted. - -For keys that are kept offline and managed by the Notary CLI client, such the -keys with the root, targets, and snapshot roles, you can rotate them with: - -```bash -notary key rotate // -``` - -The Notary CLI client generates a new key for the role you specified, and -prompts you for a passphrase to encrypt it. -Then you're prompted for the passphrase for the key you're rotating, and if it -is correct, the Notary CLI client contacts the Notary server to publish the -change. - -You can also rotate keys that are stored in the Notary server, such as the keys -with the snapshot or timestamp role. For that, run: - -```bash -notary key rotate // --server-managed -``` - -## Manage keys for delegation roles - -To delegate image signing to other UCP users, get the `cert.pem` file that's -included in their client bundle and run: - -```bash -notary delegation add \ - // targets/ user1.pem user2.pem \ - --all-paths --publish -``` - -You can also remove keys from a delegation role: - -```bash -# Remove the given keys from a delegation role -notary delegation remove \ - // targets/ \ - --publish - -# Alternatively, you can remove keys from all delegation roles -notary delegation purge // --key --key -``` - -## Troubleshooting - -Notary CLI has a `-D` flag that you can use to increase the logging level. You -can use this for troubleshooting. - -Usually most problems are fixed by ensuring you're communicating with the -correct Notary server, using the `-s` flag, and that you're using the correct -directory where your private keys are stored, with the `-d` flag. - -## Where to go next - -- [Learn more about Notary](/notary/advanced_usage.md) -- [Notary architecture](/notary/service_architecture.md) diff --git a/ee/dtr/user/manage-images/sign-images/trust-with-remote-ucp.md b/ee/dtr/user/manage-images/sign-images/trust-with-remote-ucp.md new file mode 100644 index 0000000000..011cdb3535 --- /dev/null +++ b/ee/dtr/user/manage-images/sign-images/trust-with-remote-ucp.md @@ -0,0 +1,258 @@ +--- +title: Using Docker Content Trust with a Remote UCP Cluster +description: Learn how to use a single DTR's trust data with remote UCPs. +keywords: registry, sign, trust, notary +redirect_from: +- /ee/ucp/admin/configure/integrate-with-multiple-registries/ +--- + +For more advanced deployments, you may want to share one Docker Trusted Registry +across multiple Universal Control Planes. However, customers wanting to adopt +this model alongside the [Only Run Signed +Images](/ee/ucp/admin/configure/run-only-the-images-you-trust.md) UCP feature, run into problems as each UCP operates an independent set of users. + +Docker Content Trust (DCT) gets around this problem, since users from +a remote UCP are able to sign images in the central DTR and still apply runtime +enforcement. + +In the following example, we will connect DTR managed by UCP cluster 1 with a remote UCP cluster which we are calling UCP cluster 2, sign the +image with a user from UCP cluster 2, and provide runtime enforcement +within UCP cluster 2. This process could be repeated over and over, +integrating DTR with multiple remote UCP clusters, signing the image with users +from each environment, and then providing runtime enforcement in each remote UCP +cluster separately. + +![](../../../images/remoteucp-graphic.png) + +> Before attempting this guide, familiarize yourself with [Docker Content +> Trust](engine/security/trust/content_trust/#signing-images-with-docker-content-trust) +> and [Only Run Signed +> Images](/ee/ucp/admin/configure/run-only-the-images-you-trust.md) on a +> single UCP. Many of the concepts within this guide may be new without that +> background. + +## Prerequisites + +- Cluster 1, running UCP 3.0.x or higher, with a DTR 2.5.x or higher deployed + within the cluster. +- Cluster 2, running UCP 3.0.x or higher, with no DTR node. +- Nodes on Cluster 2 need to trust the Certificate Authority which signed DTR's + TLS Certificate. This can be tested by logging on to a cluster 2 virtual + machine and running `curl https://dtr.example.com`. +- The DTR TLS Certificate needs be properly configured, ensuring that the + **Loadbalancer/Public Address** field has been configured, with this address + included [within the + certificate](/ee/dtr/admin/configure/use-your-own-tls-certificates/). +- A machine with the [Docker Client](/ee/ucp/user-access/cli/) (CE 17.12 / + EE 1803 or newer) installed, as this contains the relevant `$ docker trust` + commands. + +## Registering DTR with a remote Universal Control Plane + +As there is no registry running within cluster 2, by default UCP will not know +where to check for trust data. Therefore, the first thing we need to do is +register DTR within the remote UCP in cluster 2. When you normally +install DTR, this registration process happens by default to +a local UCP, or cluster 1. + +> The registration process allows the remote UCP to get signature data from DTR, +> however this will not provide Single Sign On (SSO). Users on cluster 2 will not be +> synced with cluster 1's UCP or DTR. Therefore when pulling images, registry +> authentication will still need to be passed as part of the service definition +> if the repository is private. See +> [Kubernetes](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-secret-in-the-cluster-that-holds-your-authorization-token) +> or [Docker +> Swarm](/engine/swarm/services/#create-a-service-using-an-image-on-a-private-registry) examples. + +To add a new registry, retrieve the Certificate +Authority (CA) used to sign the DTR TLS Certificate through the DTR URL's +`/ca` endpoint. + +```bash +$ curl -ks https://dtr.example.com/ca > dtr.crt +``` + +Next, convert the DTR certificate into a JSON configuration file +for registration within the UCP for cluster 2. + +You can find a template of the `dtr-bundle.json` below. Replace the host address with your DTR URL, and enter the contents of the DTR CA certificate between the new line commands `\n and \n`. + +> ### JSON Formatting +> Ensure there are no line breaks between each line +> of the DTR CA certificate within the JSON file. Use your favorite JSON formatter for validation. + +```bash +$ cat dtr-bundle.json +{ + "hostAddress": "dtr.example.com", + "caBundle": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----" +} +``` + +Now upload the configuration file to cluster 2's UCP +through the UCP API endpoint, `/api/config/trustedregistry_`. To authenticate +against the API of cluster 2's UCP, we have downloaded a [UCP client +bundle](/ee/ucp/user-access/cli/#download-client-certificates/), extracted it in +the current directory, and will reference the keys for authentication. + +```bash +$ curl --cacert ca.pem --cert cert.pem --key key.pem \ + -X POST \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -d @dtr-bundle.json \ + https://cluster2.example.com/api/config/trustedregistry_ +``` + +Navigate to the UCP web interface to verify that the JSON file was imported successfully, as the UCP endpoint will not +output anything. Select **Admin > Admin Settings > Docker +Trusted Registry**. If the registry has been added successfully, you should see +the DTR listed. + +![](../../../images/remoteucp-addregistry.png){: .with-border} + + +Additionally, you can check the full [configuration +file](/ee/ucp/admin/configure/ucp-configuration-file/) within cluster 2's UCP. +Once downloaded, the `ucp-config.toml` file should now contain a section called +`[registries]` + +```bash +$ curl --cacert ca.pem --cert cert.pem --key key.pem https://cluster2.example.com/api/ucp/config-toml > ucp-config.toml +``` + +If the new registry isn't shown in the list, check the `ucp-controller` container logs on cluster 2. + +## Signing an image in DTR + +We will now sign an image and push this to DTR. To sign images we need a user's public private +key pair from cluster 2. It can be found in a client bundle, with +`key.pem` being a private key and `cert.pem` being the public key on an **X.509** +certificate. + +First, load the private key into the local Docker trust store +`(~/.docker/trust)`. The name used here is purely metadata to help keep track of +which keys you have imported. + +``` +$ docker trust key load --name cluster2admin key.pem +Loading key from "key.pem"... +Enter passphrase for new cluster2admin key with ID a453196: +Repeat passphrase for new cluster2admin key with ID a453196: +Successfully imported key from key.pem +``` + +Next initiate the repository, and add the public key of cluster 2's user +as a signer. You will be asked for a number of passphrases to protect the keys. +Keep note of these passphrases, and see [Docker Content Trust documentation] +(/engine/security/trust/trust_delegation/#managing-delegations-in-a-notary-server) to learn more about managing keys. + + +``` +$ docker trust signer add --key cert.pem cluster2admin dtr.example.com/admin/trustdemo +Adding signer "cluster2admin" to dtr.example.com/admin/trustdemo... +Initializing signed repository for dtr.example.com/admin/trustdemo... +Enter passphrase for root key with ID 4a72d81: +Enter passphrase for new repository key with ID dd4460f: +Repeat passphrase for new repository key with ID dd4460f: +Successfully initialized "dtr.example.com/admin/trustdemo" +Successfully added signer: cluster2admin to dtr.example.com/admin/trustdemo +``` + +Finally, sign the image tag. This pushes the image up to DTR, as well as +signs the tag with the user from cluster 2's keys. + +``` +$ docker trust sign dtr.example.com/admin/trustdemo:1 +Signing and pushing trust data for local image dtr.example.com/admin/trustdemo:1, may overwrite remote trust data +The push refers to repository [dtr.olly.dtcntr.net/admin/trustdemo] +27c0b07c1b33: Layer already exists +aa84c03b5202: Layer already exists +5f6acae4a5eb: Layer already exists +df64d3292fd6: Layer already exists +1: digest: sha256:37062e8984d3b8fde253eba1832bfb4367c51d9f05da8e581bd1296fc3fbf65f size: 1153 +Signing and pushing trust metadata +Enter passphrase for cluster2admin key with ID a453196: +Successfully signed dtr.example.com/admin/trustdemo:1 +``` + +Within the DTR web interface, you should now be able to see your newly pushed tag with the **Signed** text next to the size. + +![](../../../images/remoteucp-signedimage.png){: .with-border} + + +You could sign this image multiple times if required, whether it's multiple +teams from the same cluster wanting to sign the image, or you integrating DTR with more remote UCPs so users from clusters 1, +2, 3, or more can all sign the same image. + +## Enforce Signed Image Tags on the Remote UCP + +We can now enable **Only Run Signed Images** on the remote UCP. To do this, +login to cluster 2's UCP web interface as an admin. Select **Admin > Admin Settings > Docker Content +Trust**. + +See [Run only the images you trust](/ee/ucp/admin/configure/run-only-the-images-you-trust/) for more information on only running signed images in UCP. + + +![](../../../images/remoteucp-enablesigning.png){: .with-border} + + +Finally we can now deploy a workload on cluster 2, using a signed +image from a DTR running on cluster 1. This workload could be a simple `$ docker +run`, a Swarm Service, or a Kubernetes workload. As a simple test, source a +client bundle, and try running one of your signed images. + +``` +$ source env.sh + +$ docker service create dtr.example.com/admin/trustdemo:1 +nqsph0n6lv9uzod4lapx0gwok +overall progress: 1 out of 1 tasks +1/1: running [==================================================>] +verify: Service converged + +$ docker service ls +ID NAME MODE REPLICAS IMAGE PORTS +nqsph0n6lv9u laughing_lamarr replicated 1/1 dtr.example.com/admin/trustdemo:1 +``` + +## Troubleshooting + +If the image is stored in a private repository within DTR, you need to pass credentials to the +Orchestrator as there is no SSO between cluster 2 and DTR. See the relevant +[Kubernetes](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-secret-in-the-cluster-that-holds-your-authorization-token) or [Docker Swarm](/engine/swarm/services/#create-a-service-using-an-image-on-a-private-registry) documentation for more details. + +### Example Errors + +#### Image or trust data does not exist +``` +image or trust data does not exist for dtr.example.com/admin/trustdemo:1 +``` + +This means something went wrong when initiating the repository or signing the +image, as the tag contains no signing data. + +#### Image did not meet required signing policy + +``` +Error response from daemon: image did not meet required signing policy + +dtr.example.com/admin/trustdemo:1: image did not meet required signing policy +``` + +This means that the image was signed correctly, however the user who signed the +image does not meet the signing policy in cluster 2. This could be because you +signed the image with the wrong user keys. + +#### DTR URL must be a registered trusted registry + +``` +Error response from daemon: dtr.example.com must be a registered trusted registry. See 'docker run --help'. +``` + +This means you have not registered DTR to work with a remote UCP instance yet, as outlined in [Registering DTR with a Remote Universal Control Plane](#registering-dtr-with-a-remote-universal-control-plane). + +## Where to go next + +- [Learn more about Notary](/notary/advanced_usage.md) +- [Notary architecture](/notary/service_architecture.md) diff --git a/ee/dtr/user/promotion-policies/internal-promotion.md b/ee/dtr/user/promotion-policies/internal-promotion.md index d238e312da..34afec5d07 100644 --- a/ee/dtr/user/promotion-policies/internal-promotion.md +++ b/ee/dtr/user/promotion-policies/internal-promotion.md @@ -33,8 +33,8 @@ the repository page on the DTR web interface, and select the > Only administrators can globally create and edit promotion policies. By default > users can only create and edit promotion policies on repositories within their -> user namespace. For more information on user permissions head to -> [Authorization and Authorization](../../admin/manage-users/). +> user namespace. For more information on user permissions, see +> [Authentication and Authorization](/ee/dtr/admin/manage-users/). ![repository policies](../../images/internal-promotion-2.png){: .with-border} diff --git a/ee/dtr/user/promotion-policies/push-mirror.md b/ee/dtr/user/promotion-policies/push-mirror.md index 0465b0240f..b7c552a260 100644 --- a/ee/dtr/user/promotion-policies/push-mirror.md +++ b/ee/dtr/user/promotion-policies/push-mirror.md @@ -5,7 +5,7 @@ keywords: registry, promotion, mirror --- Docker Trusted Registry allows you to create mirroring policies for a repository. -When an image gets pushed to a repository and meets a certain criteria, +When an image gets pushed to a repository and meets the mirroring criteria, DTR automatically pushes it to a repository in a remote Docker Trusted or Hub registry. This not only allows you to mirror images but also allows you to create diff --git a/ee/supported-platforms.md b/ee/supported-platforms.md index 50bcfc1c2d..36722b1abe 100644 --- a/ee/supported-platforms.md +++ b/ee/supported-platforms.md @@ -1,9 +1,14 @@ --- title: About Docker Enterprise description: Information about Docker Enterprise 2.1 -keywords: Docker Enterprise, enterprise, enterprise edition, ee, docker ee, docker enterprise edition, lts, commercial, cs engine +keywords: Docker Enterprise, enterprise, enterprise edition, ee, docker ee, docker enterprise edition, lts, commercial, cs engine, commercially supported redirect_from: - /enterprise/supported-platforms/ + - /cs-engine/ + - /cs-engine/1.12/ + - /cs-engine/1.12/upgrade/ + - /cs-engine/1.13/ + - /cs-engine/1.13/upgrade/ green-check: '![yes](/install/images/green-check.svg){: style="height: 14px; margin:auto;"}' install-prefix-ee: '/install/linux/docker-ee' --- diff --git a/ee/ucp/admin/backups-and-disaster-recovery.md b/ee/ucp/admin/backups-and-disaster-recovery.md index c07aa4f8a0..f7242d35e6 100644 --- a/ee/ucp/admin/backups-and-disaster-recovery.md +++ b/ee/ucp/admin/backups-and-disaster-recovery.md @@ -27,10 +27,9 @@ UCP maintains data about: This data is persisted on the host running UCP, using named volumes. [Learn more about UCP named volumes](../ucp-architecture.md). -UCP won't backup your routing mesh settings. After restoring you need to -[re-enable the routing mesh](../interlock/deploy/index.md). If you've customized -your layer 7 routing deployment, you'll need to re-apply those customizations too. - +> Warning: UCP will not back up your routing mesh settings. After restoring, +[reenable the routing mesh](../interlock/deploy/index.md). Additionally, if you've customized +your Layer 7 routing deployment, reapply your custom settings. ## Backup steps Back up your Docker EE components in the following order: diff --git a/ee/ucp/admin/configure/deploy-route-reflectors.md b/ee/ucp/admin/configure/deploy-route-reflectors.md index ca8f6090ed..b2541bd38f 100644 --- a/ee/ucp/admin/configure/deploy-route-reflectors.md +++ b/ee/ucp/admin/configure/deploy-route-reflectors.md @@ -29,8 +29,8 @@ and NodePorts might not work in these workloads. ## Choose dedicated nodes -Start by tainting the nodes, so that no other workload runs there. Configure -your CLI with a UCP client bundle, and for each dedicated node, run: +Start by tainting the nodes, so that no other workload runs there. [Configure +your CLI with a UCP client bundle](/ee/ucp/user-access/cli/), and for each dedicated node, run: ``` kubectl taint node \ @@ -127,45 +127,41 @@ kubectl create -f calico-rr.yaml ## Configure calicoctl To reconfigure Calico to use Route Reflectors instead of a node-to-node mesh, -you'll need to SSH into a UCP node and download the `calicoctl` tool. - -Log in to a UCP node using SSH, and run: +you'll need to tell `calicoctl` where to find the etcd key-value store managed +by UCP. From a CLI with a UCP client bundle, create a shell alias to start +`calicoctl` using the `{{ page.ucp_org }}/ucp-dsinfo` image: ``` -sudo curl --location https://github.com/projectcalico/calicoctl/releases/download/v3.1.1/calicoctl \ - --output /usr/bin/calicoctl -sudo chmod +x /usr/bin/calicoctl -``` - -Now you need to configure `calicoctl` to communicate with the etcd key-value -store managed by UCP. Create a file named `/etc/calico/calicoctl.cfg` with -the following content: - -``` -apiVersion: projectcalico.org/v3 -kind: CalicoAPIConfig -metadata: -spec: - datastoreType: "etcdv3" - etcdEndpoints: "127.0.0.1:12378" - etcdKeyFile: "/var/lib/docker/volumes/ucp-node-certs/_data/key.pem" - etcdCertFile: "/var/lib/docker/volumes/ucp-node-certs/_data/cert.pem" - etcdCACertFile: "/var/lib/docker/volumes/ucp-node-certs/_data/ca.pem" +UCP_VERSION=$(docker version --format {% raw %}'{{index (split .Server.Version "/") 1}}'{% endraw %}) +alias calicoctl="\ +docker run -i --rm \ + --pid host \ + --net host \ + -e constraint:ostype==linux \ + -e ETCD_ENDPOINTS=127.0.0.1:12378 \ + -e ETCD_KEY_FILE=/ucp-node-certs/key.pem \ + -e ETCD_CA_CERT_FILE=/ucp-node-certs/ca.pem \ + -e ETCD_CERT_FILE=/ucp-node-certs/cert.pem \ + -v /var/run/calico:/var/run/calico \ + -v ucp-node-certs:/ucp-node-certs:ro \ + {{ page.ucp_org }}/ucp-dsinfo:${UCP_VERSION} \ + calicoctl \ +" ``` ## Disable node-to-node BGP mesh -Not that you've configured `calicoctl`, you can check the current Calico BGP +Now that you've configured `calicoctl`, you can check the current Calico BGP configuration: ``` -sudo calicoctl get bgpconfig +calicoctl get bgpconfig ``` If you don't see any configuration listed, create one by running: ``` -cat << EOF | sudo calicoctl create -f - +calicoctl create -f - < bgp.yaml +calicoctl get bgpconfig --output yaml > bgp.yaml ``` Edit the `bgp.yaml` file, updating `nodeToNodeMeshEnabled` to `false`. Then update Calico configuration by running: ``` -sudo calicoctl replace -f bgp.yaml +calicoctl replace -f - < bgp.yaml ``` ## Configure Calico to use Route Reflectors @@ -198,14 +194,14 @@ To configure Calico to use the Route Reflectors you need to know the AS number for your network first. For that, run: ``` -sudo calicoctl get nodes --output=wide +calicoctl get nodes --output=wide ``` Now that you have the AS number, you can create the Calico configuration. For each Route Reflector, customize and run the following snippet: ``` -sudo calicoctl create -f - << EOF +calicoctl create -f - << EOF apiVersion: projectcalico.org/v3 kind: BGPPeer metadata: @@ -233,19 +229,34 @@ Using your UCP client bundle, run: ``` # Find the Pod name -kubectl get pods -n kube-system -o wide | grep +kubectl -n kube-system \ + get pods --selector k8s-app=calico-node -o wide | \ + grep # Delete the Pod -kubectl delete pod -n kube-system +kubectl -n kube-system delete pod ``` ## Validate peers -Now you can check that other `calico-node` pods running on other nodes are -peering with the Route Reflector: +Now you can check that `calico-node` pods running on other nodes are peering +with the Route Reflector. From a CLI with a UCP client bundle, use a Swarm affinity filter to run `calicoctl node +status` on any node running `calico-node`: ``` -sudo calicoctl node status +UCP_VERSION=$(docker version --format {% raw %}'{{index (split .Server.Version "/") 1}}'{% endraw %}) +docker run -i --rm \ + --pid host \ + --net host \ + -e affinity:container=='k8s_calico-node.*' \ + -e ETCD_ENDPOINTS=127.0.0.1:12378 \ + -e ETCD_KEY_FILE=/ucp-node-certs/key.pem \ + -e ETCD_CA_CERT_FILE=/ucp-node-certs/ca.pem \ + -e ETCD_CERT_FILE=/ucp-node-certs/cert.pem \ + -v /var/run/calico:/var/run/calico \ + -v ucp-node-certs:/ucp-node-certs:ro \ + {{ page.ucp_org }}/ucp-dsinfo:${UCP_VERSION} \ + calicoctl node status ``` You should see something like: diff --git a/ee/ucp/admin/configure/create-audit-logs.md b/ee/ucp/admin/configure/enable-audit-logging.md similarity index 98% rename from ee/ucp/admin/configure/create-audit-logs.md rename to ee/ucp/admin/configure/enable-audit-logging.md index 871d6a3194..811d25d459 100644 --- a/ee/ucp/admin/configure/create-audit-logs.md +++ b/ee/ucp/admin/configure/enable-audit-logging.md @@ -2,6 +2,7 @@ title: Enable audit logging on UCP description: Learn how to enable audit logging of all activity in UCP keywords: logs, ucp, swarm, kubernetes, audits +redirect_from: /ee/ucp/admin/configure/create-audit-logs/ --- Audit logs are a chronological record of security-relevant activities by @@ -60,7 +61,7 @@ generate chargeback information. created by the event, alerting features can be built on top of event tools that generate alerts for ops teams (PagerDuty, OpsGenie, Slack, or custom solutions). -## Enablig UCP Audit Logging +## Enabling UCP Audit Logging UCP audit logging can be enabled via the UCP web user interface, the UCP API or via the UCP configuration file. diff --git a/ee/ucp/admin/configure/external-auth/index.md b/ee/ucp/admin/configure/external-auth/index.md index fd6f2e852e..821640870b 100644 --- a/ee/ucp/admin/configure/external-auth/index.md +++ b/ee/ucp/admin/configure/external-auth/index.md @@ -30,7 +30,7 @@ the *distinguished name* of the node in the LDAP directory tree where the search starts looking for users. Access LDAP settings by navigating to the **Authentication & Authorization** -page in the UCP web UI. There are two sections for controlling LDAP searches +page in the UCP web interface. There are two sections for controlling LDAP searches and servers. - **LDAP user search configurations:** This is the section of the @@ -105,7 +105,7 @@ email address, for example, `jane.doe@subsidiary1.com`. ## Configure the LDAP integration To configure UCP to create and authenticate users by using an LDAP directory, -go to the UCP web UI, navigate to the **Admin Settings** page and click +go to the UCP web interface, navigate to the **Admin Settings** page and click **Authentication & Authorization** to select the method used to create and authenticate users. @@ -220,6 +220,15 @@ UCP enables syncing teams with a search query or group in your organization's LDAP directory. [Sync team members with your organization's LDAP directory](../../../authorization/create-teams-with-ldap.md). +## LDAP Configuration via API + +As of UCP 3.1.5, LDAP-specific `GET` and `PUT` API endpoints have been added to the Config resource. Note that swarm mode has to be enabled before you can hit the following endpoints: + +- `GET /api/ucp/config/auth/ldap` - Returns information on your current system LDAP configuration. +- `PUT /api/ucp/config/auth/ldap` - Lets you update your LDAP configuration. + +See [UCP API Documentation](/reference/ucp/3.1/api/) for additonal information. + ## Where to go next - [Create users and teams manually](../../../authorization/create-users-and-teams-manually.md) diff --git a/ee/ucp/admin/configure/integrate-saml.md b/ee/ucp/admin/configure/integrate-saml.md new file mode 100644 index 0000000000..112deb9c26 --- /dev/null +++ b/ee/ucp/admin/configure/integrate-saml.md @@ -0,0 +1,93 @@ +--- +title: SAML integration +description: Learn how to use SAML to link a UCP team with an Identity Provider (IdP) Group +keywords: cluster, node, join +--- + +# SAML integration + +## Typical steps involved in SAML integration: +1. Configure IdP. +2. Enable SAML and configure UCP as the Service Provider in **Admin Settings** -> **Authentication and Authorization**. +3. Create (Edit) Teams to link with the Group memberships. This updates team membership information when a user signs in with SAML. + +### Configure IdP: +Service Provider metadata is available at `https:///enzi/v0/saml/metadata` +after SAML is enabled. The metadata link is also labeled as `entityID`. + +> **Note**: Only `POST` binding is supported for the 'Assertion Consumer Service', which is located +at `https:///enzi/v0/saml/acs`. + +### Enable SAML and configure UCP +After UCP sends an `AuthnRequest` to the IdP, the following `Assertion` +is expected: + +- `Subject` includes a `NameID` that is identified as the UCP username. +In `AuthnRequest`, `NameIDFormat` is set to `urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified`. +This allows maximum compatibility for various Identity Providers. + +```xml + + ry4nz + + + + +``` + +- Optional `Attribute` named `fullname` is mapped to the **Full Name** field +in the UCP account. + +> **Note**: UCP uses the value of the first occurrence of an `Attribute` with `Name="fullname"` as the **Full Name**. + +```xml + + user.displayName + + +``` + +- Optional `Attribute` named `member-of` is linked to the UCP team. +Values are set in the UCP interface. + +> **Note**: UCP uses all `AttributeStatements` and `Attributes` in the `Assertion` with `Name="member-of"`. + +```xml + + groupName + + +``` + +- Optional `Attribute` named `is-admin` determines if the user is an administrator. The content in the `AttributeValue` is ignored. + +```xml + + value_doe_not_matter + + +``` + +#### Okta Configuration +Configuring with Okta is straightforward, as shown in the following examples: +![Configure in Okta](../../images/saml_okta_2.png) +![Configure in UCP](../../images/saml_okta_1.png) + +When two or more group names are expected to return with the Assertion, use the `regex` filter. For example, use the value `apple|orange` +to return groups `apple` and `orange`. +![Regex_filter_for_group_Okta](../../images/saml_okta_3.png) + +### Service Provider Configuration +Enter the Identity Provider's metadata URL to obtain its metadata. To access the URL, you might need to +provide the CA certificate that can verify the remote server. + +### Link Group memeberships with users +Use the 'edit' or 'create' team dialog to associate SAML group assertion with +the UCP team so that user team membership is synchronized when the user logs in. +![Link UCP team with IdP group](../../images/saml_okta_4.png) diff --git a/ee/ucp/admin/configure/integrate-with-multiple-registries.md b/ee/ucp/admin/configure/integrate-with-multiple-registries.md deleted file mode 100644 index fdf19a4281..0000000000 --- a/ee/ucp/admin/configure/integrate-with-multiple-registries.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Integrate with multiple registries -description: Integrate UCP with multiple registries -keywords: trust, registry, integrate, UCP, DTR -redirect_from: - - /datacenter/ucp/3.0/guides/admin/configure/integrate-with-multiple-registries/ ---- - -Universal Control Plane can pull and run images from any image registry, -including Docker Trusted Registry and Docker Hub. - -If your registry uses globally-trusted TLS certificates, everything works -out of the box, and you don't need to configure anything. But if your registries -use self-signed certificates or certificates issues by your own Certificate -Authority, you need to configure UCP to trust those registries. - -## Trust Docker Trusted Registry - -To configure UCP to trust a DTR deployment, you need to update the -[UCP system configuration](ucp-configuration-file.md) to include one entry for -each DTR deployment: - -``` -[[registries]] - host_address = "dtr.example.org" - ca_bundle = """ ------BEGIN CERTIFICATE----- -... ------END CERTIFICATE-----""" - -[[registries]] - host_address = "internal-dtr.example.org:444" - ca_bundle = """ ------BEGIN CERTIFICATE----- -... ------END CERTIFICATE-----""" -``` - -You only need to include the port section if your DTR deployment is running -on a port other than 443. - -You can customize and use the script below to generate a file named -`trust-dtr.toml` with the configuration needed for your DTR deployment. - -``` -# Replace this url by your DTR deployment url and port -DTR_URL=https://dtr.example.org -DTR_PORT=443 - -dtr_full_url=${DTR_URL}:${DTR_PORT} -dtr_ca_url=${dtr_full_url}/ca - -# Strip protocol and default https port -dtr_host_address=${dtr_full_url#"https://"} -dtr_host_address=${dtr_host_address%":443"} - -# Create the registry configuration and save it -cat < trust-dtr.toml - -[[registries]] - # host address should not contain protocol or port if using 443 - host_address = $dtr_host_address - ca_bundle = """ -$(curl -sk $dtr_ca_url)""" -EOL -``` - -You can then append the content of `trust-dtr.toml` to your current UCP -configuration to make UCP trust this DTR deployment. - -## Where to go next - -- [Integrate with LDAP by using a configuration file](external-auth/enable-ldap-config-file.md) diff --git a/ee/ucp/admin/configure/join-nodes/join-windows-nodes-to-cluster.md b/ee/ucp/admin/configure/join-nodes/join-windows-nodes-to-cluster.md index e0b8e911fc..67aa860b79 100644 --- a/ee/ucp/admin/configure/join-nodes/join-windows-nodes-to-cluster.md +++ b/ee/ucp/admin/configure/join-nodes/join-windows-nodes-to-cluster.md @@ -16,6 +16,8 @@ Follow these steps to enable a worker node on Windows. 2. Configure the Windows node. 3. Join the Windows node to the cluster. +**Note**: Refer to the [Docker compatibility matrix](https://success.docker.com/article/compatibility-matrix) for complete Docker compatibility information with Windows Server. + ## Install Docker Engine - Enterprise on Windows Server [Install Docker Engine - Enterprise](/engine/installation/windows/docker-ee/#use-a-script-to-install-docker-ee) @@ -26,30 +28,13 @@ Docker Enterprise 2.1. Follow these steps to configure the docker daemon and the Windows environment. -1. Add a label to the node. -2. Pull the Windows-specific image of `ucp-agent`, which is named `ucp-agent-win`. -3. Run the Windows worker setup script provided with `ucp-agent-win`. -4. Join the cluster with the token provided by the Docker UCP web interface or CLI. +1. Pull the Windows-specific image of `ucp-agent`, which is named `ucp-agent-win`. +2. Run the Windows worker setup script provided with `ucp-agent-win`. +3. Join the cluster with the token provided by the Docker UCP web interface or CLI. ### Add a label to the node -Configure the Docker Engine running on the node to have a label. This makes -it easier to deploy applications on nodes with this label. - -Create the file `C:\ProgramData\docker\config\daemon.json` with the following -content: - -``` -{ - "labels": ["os=windows"] -} -``` - -Restart Docker for the changes to take effect: - -``` -Restart-Service docker -``` +As of Docker Enterprise 2.1, which includes UCP 3.1, this step is no longer necessary. Windows nodes are automatically assigned the `ostype` label `ostype=windows`. ### Pull the Windows-specific images @@ -226,3 +211,7 @@ Some features are not yet supported on Windows nodes: * Mounts * On Windows, Docker can't listen on a Unix socket. Use TCP or a named pipe instead. + +## Known Issues + +Refer to the [Docker EE UCP release notes](/ee/ucp/release-notes) for Known Issues information. diff --git a/ee/ucp/admin/configure/ucp-configuration-file.md b/ee/ucp/admin/configure/ucp-configuration-file.md index 78ce30c8a5..61e97e5e21 100644 --- a/ee/ucp/admin/configure/ucp-configuration-file.md +++ b/ee/ucp/admin/configure/ucp-configuration-file.md @@ -31,16 +31,22 @@ Specify your configuration settings in a TOML file. Use the `config-toml` API to export the current settings and write them to a file. Within the directory of a UCP admin user's [client certificate bundle](../../user-access/cli.md), the following command exports the current configuration for the UCP hostname `UCP_HOST` to a file named `ucp-config.toml`: -```bash -curl --cacert ca.pem --cert cert.pem --key key.pem https://UCP_HOST/api/ucp/config-toml > ucp-config.toml +### Get an authtoken + +``` +AUTHTOKEN=$(curl --silent --insecure --data '{"username":"","password":""}' https://UCP_HOST/auth/login | jq --raw-output .auth_token) ``` -Edit `ucp-config.toml`, then use the following `curl` command to import it back into -UCP and apply your configuration changes: +### Download config file +``` +curl -X GET "https://UCP_HOST/api/ucp/config-toml" -H "accept: application/toml" -H "Authorization: Bearer $AUTHTOKEN" > ucp-config.toml +``` -```bash -curl --cacert ca.pem --cert cert.pem --key key.pem --upload-file ucp-config.toml https://UCP_HOST/api/ucp/config-toml +### Upload config file + +``` +curl -X PUT -H "accept: application/toml" -H "Authorization: Bearer $AUTHTOKEN" --upload-file 'path/to/ucp-config.toml' https://UCP_HOST/api/ucp/config-toml ``` ## Apply an existing configuration file at install time @@ -141,6 +147,8 @@ Specifies whether DTR images require signing. ### log_configuration table (optional) +> Note: This feature has been deprecated. Refer to the [Deprecation notice](https://docs.docker.com/ee/ucp/release-notes/#deprecation-notice) for additional information. + Configures the logging options for UCP components. | Parameter | Required | Description | @@ -189,6 +197,7 @@ components. Assigning these values overrides the settings in a container's | `calico_mtu` | no | Set the MTU (maximum transmission unit) size for the Calico plugin. | | `ipip_mtu` | no | Set the IPIP MTU size for the calico IPIP tunnel interface. | | `azure_ip_count` | no | Set the IP count for azure allocator to allocate IPs per Azure virtual machine. | +| `service-cluster-ip-range` | yes | Sets the subnet pool from which the IP for Services should be allocated. Default is `10.96.0.0/16`. | `nodeport_range` | yes | Set the port range that for Kubernetes services of type NodePort can be exposed in. Default is `32768-35535`. | | `custom_kube_api_server_flags` | no | Set the configuration options for the Kubernetes API server. (dev) | | `custom_kube_controller_manager_flags` | no | Set the configuration options for the Kubernetes controller manager. (dev) | diff --git a/ee/ucp/admin/configure/use-nfs-volumes.md b/ee/ucp/admin/configure/use-nfs-volumes.md deleted file mode 100644 index 821a94c827..0000000000 --- a/ee/ucp/admin/configure/use-nfs-volumes.md +++ /dev/null @@ -1,443 +0,0 @@ ---- -title: Use NFS persistent storage -description: Learn how to add support for NFS persistent storage by adding a default storage class. -keywords: Universal Control Plane, UCP, Docker EE, Kubernetes, storage, volume ---- - -Docker UCP supports Network File System (NFS) persistent volumes for -Kubernetes. To enable this feature on a UCP cluster, you need to set up -an NFS storage volume provisioner. - -> ### Kubernetes storage drivers -> ->NFS is one of the Kubernetes storage drivers that UCP supports. See [Kubernetes Volume Drivers](https://success.docker.com/article/compatibility-matrix#kubernetesvolumedrivers) in the Compatibility Matrix for the full list. -{: important} - -## Enable NFS volume provisioning - -The following steps enable NFS volume provisioning on a UCP cluster: - -1. Create an NFS server pod. -2. Create a default storage class. -3. Create persistent volumes that use the default storage class. -4. Deploy your persistent volume claims and applications. - -The following procedure shows you how to deploy WordPress and a MySQL backend -that use NFS volume provisioning. - -[Install the Kubernetes CLI](../../user-access/kubectl.md) to complete the -procedure for enabling NFS provisioning. - -## Create the NFS Server - -To enable NFS volume provisioning on a UCP cluster, you need to install -an NFS server. Google provides an image for this purpose. - -On any node in the cluster with a [UCP client bundle](../../user-access/cli.md), -copy the following yaml to a file named nfs-server.yaml. - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: nfs-server - namespace: default - labels: - role: nfs-server -spec: - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - nodeSelector: - node-role.kubernetes.io/master: "" - containers: - - name: nfs-server - image: gcr.io/google_containers/volume-nfs:0.8 - securityContext: - privileged: true - ports: - - name: nfs-0 - containerPort: 2049 - protocol: TCP - restartPolicy: Always -``` - -Run the following command to create the NFS server pod. - -```bash -kubectl create -f nfs-server.yaml -``` - -The default storage class needs the IP address of the NFS server pod. -Run the following command to get the pod's IP address. - -```bash -kubectl describe pod nfs-server | grep IP: -``` - -The result looks like this: - -``` -IP: 192.168.106.67 -``` - -## Create the default storage class - -To enable NFS provisioning, create a storage class that has the -`storageclass.kubernetes.io/is-default-class` annotation set to `true`. -Also, provide the IP address of the NFS server pod as a parameter. - -Copy the following yaml to a file named default-storage.yaml. Replace -`` with the IP address from the previous step. - -```yaml -kind: StorageClass -apiVersion: storage.k8s.io/v1beta1 -metadata: - namespace: default - name: default-storage - annotations: - storageclass.kubernetes.io/is-default-class: "true" - labels: - kubernetes.io/cluster-service: "true" -provisioner: kubernetes.io/nfs -parameters: - path: / - server: -``` - -Run the following command to create the default storage class. - -```bash -kubectl create -f default-storage.yaml -``` - -Confirm that the storage class was created and that it's assigned as the -default for the cluster. - -```bash -kubectl get storageclass -``` - -It should look like this: - -``` -NAME PROVISIONER AGE -default-storage (default) kubernetes.io/nfs 58s -``` - -## Create persistent volumes - -Create two persistent volumes based on the `default-storage` storage class. -One volume is for the MySQL database, and the other is for WordPress. - -To create an NFS volume, specify `storageClassName: default-storage` in the -persistent volume spec. - -Copy the following yaml to a file named local-volumes.yaml. - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: local-pv-1 - labels: - type: local -spec: - storageClassName: default-storage - capacity: - storage: 20Gi - accessModes: - - ReadWriteOnce - hostPath: - path: /tmp/data/pv-1 ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: local-pv-2 - labels: - type: local -spec: - storageClassName: default-storage - capacity: - storage: 20Gi - accessModes: - - ReadWriteOnce - hostPath: - path: /tmp/data/pv-2 -``` - -Run this command to create the persistent volumes. - -```bash -kubectl create -f local-volumes.yaml -``` - -Inspect the volumes: - -```bash -kubectl get persistentvolumes -``` - -They should look like this: - -``` -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE -local-pv-1 20Gi RWO Retain Available default-storage 1m -local-pv-2 20Gi RWO Retain Available default-storage 1m -``` - -## Create a secret for the MySQL password - -Create a secret for the password that you want to use for accessing the MySQL -database. Use this command to create the secret object: - -```bash -kubectl create secret generic mysql-pass --from-literal=password= -``` - -## Deploy persistent volume claims and applications - -You have two persistent volumes that are available for claims. The MySQL -deployment uses one volume, and WordPress uses the other. - -Copy the following yaml to a file named `wordpress-deployment.yaml`. -The claims in this file make no reference to a particular storage class, so -they bind to any available volumes that can satisfy the storage request. -In this example, both claims request `20Gi` of storage. - -> Use specific persistent volume -> ->If you are attempting to use a specific persistent volume and not let Kubernetes choose at random, ensure that the `storageClassName` key is populated in the persistent claim itself. -{: important} - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: wordpress-mysql - labels: - app: wordpress -spec: - ports: - - port: 3306 - selector: - app: wordpress - tier: mysql - clusterIP: None ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: mysql-pv-claim - labels: - app: wordpress -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi ---- -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - name: wordpress-mysql - labels: - app: wordpress -spec: - selector: - matchLabels: - app: wordpress - tier: mysql - strategy: - type: Recreate - template: - metadata: - labels: - app: wordpress - tier: mysql - spec: - containers: - - image: mysql:5.6 - name: mysql - env: - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: mysql-pass - key: password - ports: - - containerPort: 3306 - name: mysql - volumeMounts: - - name: mysql-persistent-storage - mountPath: /var/lib/mysql - volumes: - - name: mysql-persistent-storage - persistentVolumeClaim: - claimName: mysql-pv-claim ---- -apiVersion: v1 -kind: Service -metadata: - name: wordpress - labels: - app: wordpress -spec: - ports: - - port: 80 - selector: - app: wordpress - tier: frontend - type: LoadBalancer ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: wp-pv-claim - labels: - app: wordpress -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi ---- -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - name: wordpress - labels: - app: wordpress -spec: - selector: - matchLabels: - app: wordpress - tier: frontend - strategy: - type: Recreate - template: - metadata: - labels: - app: wordpress - tier: frontend - spec: - containers: - - image: wordpress:4.8-apache - name: wordpress - env: - - name: WORDPRESS_DB_HOST - value: wordpress-mysql - - name: WORDPRESS_DB_PASSWORD - valueFrom: - secretKeyRef: - name: mysql-pass - key: password - ports: - - containerPort: 80 - name: wordpress - volumeMounts: - - name: wordpress-persistent-storage - mountPath: /var/www/html - volumes: - - name: wordpress-persistent-storage - persistentVolumeClaim: - claimName: wp-pv-claim -``` - -Run the following command to deploy the MySQL and WordPress images. - -```bash -kubectl create -f wordpress-deployment.yaml -``` - -Confirm that the pods are up and running. - -```bash -kubectl get pods -``` - -You should see something like this: - -``` -NAME READY STATUS RESTARTS AGE -nfs-server 1/1 Running 0 2h -wordpress-f4dcfdf45-4rkgs 1/1 Running 0 1m -wordpress-mysql-7bdd6d857c-fvgqx 1/1 Running 0 1m -``` - -It may take a few minutes for both pods to enter the `Running` state. - -## Inspect the deployment - -The WordPress deployment is ready to go. You can see it in action by opening -a web browser on the URL of the WordPress service. The easiest way to get the -URL is to open the UCP web UI, navigate to the Kubernetes **Load Balancers** -page, and click the **wordpress** service. In the details pane, the URL is -listed in the **Ports** section. - -![](../../images/use-nfs-volume-1.png){: .with-border} - -Also, you can get the URL by using the command line. - -On any node in the cluster, run the following command to get the IP addresses -that are assigned to the current node. - -```bash -{% raw %} -docker node inspect --format '{{ index .Spec.Labels "com.docker.ucp.SANs" }}' -{% endraw %} -``` - -You should see a list of IP addresses, like this: - -``` -172.31.36.167,jg-latest-ubuntu-0,127.0.0.1,172.17.0.1,54.213.225.17 -``` - -One of these corresponds with the external node IP address. Look for an address -that's not in the `192.*`, `127.*`, and `172.*` ranges. In the current example, -the IP address is `54.213.225.17`. - -The WordPress web UI is served through a `NodePort`, which you get with this -command: - -```bash -kubectl describe svc wordpress | grep NodePort -``` - -Which returns something like this: - -``` -NodePort: 34746/TCP -``` - -Put the two together to get the URL for the WordPress service: -`http://:`. - -For this example, the URL is `http://54.213.225.17:34746`. - -![](../../images/use-nfs-volume-2.png){: .with-border} - -## Write a blog post to use the storage - -Open the URL for the WordPress service and follow the instructions for -installing WordPress. In this example, the blog is named "NFS Volumes". - -![](../../images/use-nfs-volume-3.png){: .with-border} - -Create a new blog post and publish it. - -![](../../images/use-nfs-volume-4.png){: .with-border} - -Click the **permalink** to view the site. - -![](../../images/use-nfs-volume-5.png){: .with-border} - -## Where to go next - -- [Example of NFS based persistent volume](https://github.com/kubernetes/examples/tree/master/staging/volumes/nfs#nfs-server-part) -- [Example: Deploying WordPress and MySQL with Persistent Volumes](https://v1-8.docs.kubernetes.io/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/) diff --git a/ee/ucp/admin/configure/use-trusted-images-for-ci.md b/ee/ucp/admin/configure/use-trusted-images-for-ci.md deleted file mode 100644 index 601a27fe07..0000000000 --- a/ee/ucp/admin/configure/use-trusted-images-for-ci.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: Use trusted images for continuous integration -description: Set up and configure content trust and signing policy for use with a continuous integration system -keywords: cup, trust, notary, security, continuous integration ---- - -The document provides a minimal example on setting up Docker Content Trust (DCT) in -Universal Control Plane (UCP) for use with a Continuous Integration (CI) system. It -covers setting up the necessary accounts and trust delegations to restrict only those -images built by your CI system to be deployed to your UCP managed cluster. - -## Set up UCP accounts and teams - -The first step is to create a user account for your CI system. For the purposes of -this document we will assume you are using Jenkins as your CI system and will therefore -name the account "jenkins". As an admin user logged in to UCP, navigate to "User Management" -and select "Add User". Create a user with the name "jenkins" and set a strong password. - -Next, create a team called "CI" and add the "jenkins" user to this team. All signing -policy is team based, so if we want only a single user to be able to sign images -destined to be deployed on the cluster, we must create a team for this one user. - -## Set up the signing policy - -While still logged in as an admin, navigate to "Admin Settings" and select the "Content Trust" -subsection. Select the checkbox to enable content trust and in the select box that appears, -select the "CI" team we have just created. Save the settings. - -This policy will require that every image that referenced in a `docker image pull`, -`docker container run`, or `docker service create` must be signed by a key corresponding -to a member of the "CI" team. In this case, the only member is the "jenkins" user. - -## Create keys for the Jenkins user - -The signing policy implementation uses the certificates issued in user client bundles -to connect a signature to a user. Using an incognito browser window (or otherwise), -log in to the "jenkins" user account you created earlier. Download a client bundle for -this user. It is also recommended to change the description associated with the public -key stored in UCP such that you can identify in the future which key is being used for -signing. - -Each time a user retrieves a new client bundle, a new keypair is generated. It is therefore -necessary to keep track of a specific bundle that a user chooses to designate as their signing bundle. - -Once you have decompressed the client bundle, the only two files you need for the purposes -of signing are `cert.pem` and `key.pem`. These represent the public and private parts of -the user's signing identity respectively. We will load the `key.pem` file onto the Jenkins -servers, and use `cert.pem` to create delegations for the "jenkins" user in our -Trusted Collection. - -## Prepare the Jenkins server - -### Load `key.pem` on Jenkins - -You will need to use the notary client to load keys onto your Jenkins server. Simply run -`notary -d /path/to/.docker/trust key import /path/to/key.pem`. You will be asked to set -a password to encrypt the key on disk. For automated signing, this password can be configured -into the environment under the variable name `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE`. The `-d` -flag to the command specifies the path to the `trust` subdirectory within the server's `docker` -configuration directory. Typically this is found at `~/.docker/trust`. - -### Enable content trust - -There are two ways to enable content trust: globally, and per operation. To enabled content -trust globally, set the environment variable `DOCKER_CONTENT_TRUST=1`. To enable on a per -operation basis, wherever you run `docker image push` in your Jenkins scripts, add the flag -`--disable-content-trust=false`. You may wish to use this second option if you only want -to sign some images. - -The Jenkins server is now prepared to sign images, but we need to create delegations referencing -the key to give it the necessary permissions. - -## Initialize a repository - -Any commands displayed in this section should _not_ be run from the Jenkins server. You -will most likely want to run them from your local system. - -If this is a new repository, create it in Docker Trusted Registry (DTR) or Docker Hub, -depending on which you use to store your images, before proceeding further. - -We will now initialize the trust data and create the delegation that provides the Jenkins -key with permissions to sign content. The following commands initialize the trust data and -rotate snapshotting responsibilities to the server. This is necessary to ensure human involvement -is not required to publish new content. - -``` -notary -s https://my_notary_server.com -d ~/.docker/trust init my_repository -notary -s https://my_notary_server.com -d ~/.docker/trust key rotate my_repository snapshot -r -notary -s https://my_notary_server.com -d ~/.docker/trust publish my_repository -``` - -The `-s` flag specifies the server hosting a notary service. If you are operating against -Docker Hub, this will be `https://notary.docker.io`. If you are operating against your own DTR -instance, this will be the same hostname you use in image names when running docker commands preceded -by the `https://` scheme. For example, if you would run `docker image push my_dtr:4443/me/an_image` the value -of the `-s` flag would be expected to be `https://my_dtr:4443`. - -If you are using DTR, the name of the repository should be identical to the full name you use -in a `docker image push` command. If however you use Docker Hub, the name you use in a `docker image push` -must be preceded by `docker.io/`. i.e. if you ran `docker image push me/alpine`, you would -`notary init docker.io/me/alpine`. - -For brevity, we will exclude the `-s` and `-d` flags from subsequent command, but be aware you -will still need to provide them for the commands to work correctly. - -Now that the repository is initialized, we need to create the delegations for Jenkins. Docker -Content Trust treats a delegation role called `targets/releases` specially. It considers this -delegation to contain the canonical list of published images for the repository. It is therefore -generally desirable to add all users to this delegation with the following command: - -``` -notary delegation add my_repository targets/releases --all-paths /path/to/cert.pem -``` - -This solves a number of prioritization problems that would result from needing to determine -which delegation should ultimately be trusted for a specific image. However, because it -is anticipated that any user will be able to sign the `targets/releases` role it is not trusted -in determining if a signing policy has been met. Therefore it is also necessary to create a -delegation specifically for Jenkins: - -``` -notary delegation add my_repository targets/jenkins --all-paths /path/to/cert.pem -``` - -We will then publish both these updates (remember to add the correct `-s` and `-d` flags): - -``` -notary publish my_repository -``` - -Informational (Advanced): If we included the `targets/releases` role in determining if a signing policy -had been met, we would run into the situation of images being opportunistically deployed when -an appropriate user signs. In the scenario we have described so far, only images signed by -the "CI" team (containing only the "jenkins" user) should be deployable. If a user "Moby" could -also sign images but was not part of the "CI" team, they might sign and publish a new `targets/releases` -that contained their image. UCP would refuse to deploy this image because it was not signed -by the "CI" team. However, the next time Jenkins published an image, it would update and sign -the `targets/releases` role as whole, enabling "Moby" to deploy their image. - -## Conclusion - -With the Trusted Collection initialized, and delegations created, the Jenkins server will -now use the key we imported to sign any images we push to this repository. - -Through either the Docker CLI, or the UCP browser interface, we will find that any images -that do not meet our signing policy cannot be used. The signing policy we set up requires -that the "CI" team must have signed any image we attempt to `docker image pull`, `docker container run`, -or `docker service create`, and the only member of that team is the "jenkins" user. This -restricts us to only running images that were published by our Jenkins CI system. diff --git a/ee/ucp/admin/install/index.md b/ee/ucp/admin/install/index.md index e754fea706..2f9340829b 100644 --- a/ee/ucp/admin/install/index.md +++ b/ee/ucp/admin/install/index.md @@ -2,8 +2,6 @@ title: Install UCP for production description: Learn how to install Docker Universal Control Plane on production. keywords: Universal Control Plane, UCP, install, Docker EE -redirect_from: - - /datacenter/ucp/3.0/guides/admin/install/ --- Docker Universal Control Plane (UCP) is a containerized application that you diff --git a/ee/ucp/admin/install/install-on-azure.md b/ee/ucp/admin/install/install-on-azure.md index 809965a77c..4c1d4f122c 100644 --- a/ee/ucp/admin/install/install-on-azure.md +++ b/ee/ucp/admin/install/install-on-azure.md @@ -4,59 +4,54 @@ description: Learn how to install Docker Universal Control Plane in a Microsoft keywords: Universal Control Plane, UCP, install, Docker EE, Azure, Kubernetes --- -Docker UCP closely integrates into Microsoft Azure for its Kubernetes Networking -and Persistent Storage feature set. UCP deploys the Calico CNI provider. In Azure +Docker Universal Control Plane (UCP) closely integrates with Microsoft Azure for its Kubernetes Networking +and Persistent Storage feature set. UCP deploys the Calico CNI provider. In Azure, the Calico CNI leverages the Azure networking infrastructure for data path networking and the Azure IPAM for IP address management. There are -infrastructure prerequisites that are required prior to UCP installation for the +infrastructure prerequisites required prior to UCP installation for the Calico / Azure integration. ## Docker UCP Networking -Docker UCP configures the Azure IPAM module for Kubernetes to allocate -IP addresses to Kubernetes pods. The Azure IPAM module requires each Azure -virtual machine that's part of the Kubernetes cluster to be configured with a pool of -IP addresses. +Docker UCP configures the Azure IPAM module for Kubernetes to allocate IP +addresses for Kubernetes pods. The Azure IPAM module requires each Azure virtual +machine which is part of the Kubernetes cluster to be configured with a pool of IP +addresses. -There are two options for provisoning IPs for the Kubernetes cluster on Azure -- Docker UCP provides an automated mechanism to configure and maintain IP pools - for standalone Azure virtual machines. This service runs within the calico-node daemonset - and by default will provision 128 IP address for each node. This value can be - configured through the `azure_ip_count`in the UCP - [configuration file](../configure/ucp-configuration-file) before or after the - UCP installation. Note that if this value is reduced post-installation, existing - virtual machines will not be reconciled, and you will have to manually edit the IP count - in Azure. -- Manually provision additional IP address for each Azure virtual machine. This could be done - as part of an Azure Virtual Machine Scale Set through an ARM template. You can find an example [here](#set-up-ip-configurations-on-an-azure-virtual-machine-scale-set). - Note that the `azure_ip_count` value in the UCP - [configuration file](../configure/ucp-configuration-file) will need to be set - to 0, otherwise UCP's IP Allocator service will provision the IP Address on top of - those you have already provisioned. +There are two options for provisoning IPs for the Kubernetes cluster on Azure: + +- _An automated mechanism provided by UCP which allows for IP pool configuration and maintenance + for standalone Azure virtual machines._ This service runs within the + `calico-node` daemonset and provisions 128 IP addresses for each + node by default. For information on customizing this value, see [Adjusting the IP count value](#adjusting-the-ip-count-value). +- _Manual provision of additional IP address for each Azure virtual machine._ This + could be done through the Azure Portal, the Azure CLI `$ az network nic ip-config create`, + or an ARM template. You can find an example of an ARM template + [here](#manually-provision-ip-address-as-part-of-an-azure-virtual-machine-scale-set). ## Azure Prerequisites -You must meet these infrastructure prerequisites in order -to successfully deploy Docker UCP on Azure +You must meet the following infrastructure prerequisites in order +to successfully deploy Docker UCP on Azure: -- All UCP Nodes (Managers and Workers) need to be deployed into the same -Azure Resource Group. The Azure Networking (Vnets, Subnets, Security Groups) -components could be deployed in a second Azure Resource Group. -- All UCP Nodes (Managers and Workers) need to be attached to the same -Azure Subnet. -- All UCP (Managers and Workers) need to be tagged in Azure with the -`Orchestrator` tag. Note the value for this tag is the Kubernetes version number -in the format `Orchestrator=Kubernetes:x.y.z`. This value may change in each -UCP release. To find the relevant version please see the UCP -[Release Notes](../../release-notes). For example for UCP 3.1.0 the tag -would be `Orchestrator=Kubernetes:1.11.2`. -- The Azure Virtual Machine Object Name needs to match the Azure Virtual Machine -Computer Name and the Node Operating System's Hostname. Note this applies to the -FQDN of the host including domain names. -- An Azure Service Principal with `Contributor` access to the Azure Resource -Group hosting the UCP Nodes. Note, if using a separate networking Resource -Group the same Service Principal will need `Network Contributor` access to this -Resource Group. +- All UCP Nodes (Managers and Workers) need to be deployed into the same Azure + Resource Group. The Azure Networking components (Virtual Network, Subnets, + Security Groups) could be deployed in a second Azure Resource Group. +- The Azure Virtual Network and Subnet must be appropriately sized for your + environment, as addresses from this pool will be consumed by Kubernetes Pods. + For more information, see [Considerations for IPAM + Configuration](#considerations-for-ipam-configuration). +- All UCP worker and manager nodes need to be attached to the same Azure + Subnet. +- The Azure Virtual Machine Object Name needs to match the Azure Virtual Machine + Computer Name and the Node Operating System's Hostname which is the FQDN of + the host, including domain names. Note that this requires all characters to be in lowercase. +- An Azure Service Principal with `Contributor` access to the Azure Resource + Group hosting the UCP Nodes. This Service principal will be used by Kubernetes + to communicate with the Azure API. The Service Principal ID and Secret Key are + needed as part of the UCP prerequisites. If you are using a separate Resource + Group for the networking components, the same Service Principal will need + `Network Contributor` access to this Resource Group. UCP requires the following information for the installation: @@ -64,17 +59,18 @@ UCP requires the following information for the installation: objects are being deployed. - `tenantId` - The Azure Active Directory Tenant ID in which the UCP objects are being deployed. -- `aadClientId` - The Azure Service Principal ID -- `aadClientSecret` - The Azure Service Principal Secret Key +- `aadClientId` - The Azure Service Principal ID. +- `aadClientSecret` - The Azure Service Principal Secret Key. ### Azure Configuration File -For Docker UCP to integrate into Microsoft Azure, you need to place an Azure -configuration file within each UCP node in your cluster, at -`/etc/kubernetes/azure.json`. The `azure.json` file needs 0644 permissions. +For Docker UCP to integrate with Microsoft Azure,each UCP node in your cluster +needs an Azure configuration file, `azure.json`. Place the file within +`/etc/kubernetes`. Since the config file is owned by `root`, set its permissions +to `0644` to ensure the container user has read access. -See the template below. Note entries that do not contain `****` should not be -changed. +The following is an example template for `azure.json`. Replace `***` with real values, and leave the other +parameters as is. ``` { @@ -101,45 +97,44 @@ changed. } ``` -There are some optional values for Azure deployments: +There are some optional parameters for Azure deployments: -- `"primaryAvailabilitySetName": "****",` - The Worker Nodes availability set. -- `"vnetResourceGroup": "****",` - If your Azure Network objects live in a +- `primaryAvailabilitySetName` - The Worker Nodes availability set. +- `vnetResourceGroup` - The Virtual Network Resource group, if your Azure Network objects live in a seperate resource group. -- `"routeTableName": "****",` - If you have defined multiple Route tables within +- `routeTableName` - If you have defined multiple Route tables within an Azure subnet. -More details on this configuration file can be found -[here](https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/azure/azure.go). +See [Kubernetes' azure.go](https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/azure/azure.go) for more details on this configuration file. ## Considerations for IPAM Configuration -The subnet and the virtual network associated with the primary interface of -the Azure virtual machines need to be configured with a large enough address prefix/range. -The number of required IP addresses depends on the number of pods running -on each node and the number of nodes in the cluster. +The subnet and the virtual network associated with the primary interface of the +Azure virtual machines need to be configured with a large enough address +prefix/range. The number of required IP addresses depends on the workload and +the number of nodes in the cluster. -For example, in a cluster of 256 nodes, to run a maximum of 128 pods -concurrently on a node, make sure that the address space of the subnet and the -virtual network can allocate at least 128 * 256 IP addresses, _in addition to_ -initial IP allocations to virtual machine NICs during Azure resource creation. +For example, in a cluster of 256 nodes, make sure that the address space of the subnet and the +virtual network can allocate at least 128 * 256 IP addresses, in order to run a maximum of 128 pods +concurrently on a node. This would be ***in addition to*** initial IP allocations to virtual machine +NICs (network interfaces) during Azure resource creation. Accounting for IP addresses that are allocated to NICs during virtual machine bring-up, set -the address space of the subnet and virtual network to 10.0.0.0/16. This +the address space of the subnet and virtual network to `10.0.0.0/16`. This ensures that the network can dynamically allocate at least 32768 addresses, plus a buffer for initial allocations for primary IP addresses. > Azure IPAM, UCP, and Kubernetes > > The Azure IPAM module queries an Azure virtual machine's metadata to obtain -> a list of IP addresses that are assigned to the virtual machine's NICs. The +> a list of IP addresses which are assigned to the virtual machine's NICs. The > IPAM module allocates these IP addresses to Kubernetes pods. You configure the > IP addresses as `ipConfigurations` in the NICs associated with a virtual machine > or scale set member, so that Azure IPAM can provide them to Kubernetes when > requested. {: .important} -## Manually provision IP address as part of an Azure virtual machine scale set +## Manually provision IP address pools as part of an Azure virtual machine scale set Configure IP Pools for each member of the virtual machine scale set during provisioning by associating multiple `ipConfigurations` with the scale set’s @@ -200,24 +195,112 @@ for each virtual machine in the virtual machine scale set. } ``` -## Install UCP +## UCP Installation -Use the following command to install UCP on the manager node. -The `--pod-cidr` option maps to the IP address range that you configured for -the subnets in the previous sections, and the `--host-address` maps to the -IP address of the master node. +### Adjust the IP Count Value + +If you have manually attached additional IP addresses to the Virtual Machines +(via an ARM Template, Azure CLI or Azure Portal) or you want to reduce the +number of IP Addresses automatically provisioned by UCP from the default of 128 +addresses, you can alter the `azure_ip_count` variable in the UCP +Configuration file before installation. If you are happy with 128 addresses per +Virtual Machine, proceed to [installing UCP](#install-ucp). + +Once UCP has been installed, the UCP [configuration +file](../configure/ucp-configuration-file/) is managed by UCP and populated with +all of the cluster configuration data, such as AD/LDAP information or networking +configuration. As there is no Universal Control Plane deployed yet, we are able +to stage a [configuration file](../configure/ucp-configuration-file/) just +containing the Azure IP Count value. UCP will populate the rest of the cluster +variables during and after the installation. + +Below are some example configuration files with just the `azure_ip_count` +variable defined. These 3-line files can be preloaded into a Docker Swarm prior +to installing UCP in order to override the default `azure_ip_count` value of 128 IP +addresses per node. See [UCP configuration file](../configure/ucp-configuration-file/) +to learn more about the configuration file, and other variables that can be staged pre-install. + +> Note: Do not set the `azure_ip_count` to a value of less than 6 if you have not +> manually provisioned additional IP addresses for each Virtual Machine. The UCP +> installation will need at least 6 IP addresses to allocate to the core UCP components +> that run as Kubernetes pods. That is in addition to the Virtual +> Machine's private IP address. + +If you have manually provisioned additional IP addresses for each Virtual +Machine, and want to disallow UCP from dynamically provisioning IP +addresses for you, then your UCP configuration file would be: + +``` +$ vi example-config-1 +[cluster_config] + azure_ip_count = "0" +``` + +If you want to reduce the IP addresses dynamically allocated from 128 to a +custom value, then your UCP configuration file would be: + +``` +$ vi example-config-2 +[cluster_config] + azure_ip_count = "20" # This value may be different for your environment +``` +See [Considerations for IPAM +Configuration](#considerations-for-ipam-configuration) to calculate an +appropriate value. + +To preload this configuration file prior to installing UCP: + +1. Copy the configuration file to a Virtual Machine that you wish to become a UCP Manager Node. + +2. Initiate a Swarm on that Virtual Machine. + + ``` + $ docker swarm init + ``` + +3. Upload the configuration file to the Swarm, by using a [Docker Swarm Config](/engine/swarm/configs/). +This Swarm Config will need to be named `com.docker.ucp.config`. + ``` + $ docker config create com.docker.ucp.config + ``` + +4. Check that the configuration has been loaded succesfully. + ``` + $ docker config list + ID NAME CREATED UPDATED + igca3q30jz9u3e6ecq1ckyofz com.docker.ucp.config 1 days ago 1 days ago + ``` + +5. You are now ready to [install UCP](#install-ucp). As you have already staged + a UCP configuration file, you will need to add `--existing-config` to the + install command below. + +If you need to adjust this value post-installation, see [instructions](../configure/ucp-configuration-file/) +on how to download the UCP configuration file, change the value, and update the configuration via the API. +If you reduce the value post-installation, existing virtual machines will not be +reconciled, and you will have to manually edit the IP count in Azure. + +### Install UCP + +Run the following command to install UCP on a manager node. The `--pod-cidr` +option maps to the IP address range that you have configured for the Azure +subnet, and the `--host-address` maps to the private IP address of the master +node. Finally if you have set the [Ip Count +Value](#adjusting-the-ip-count-value) you will need to add `--existing-config` +to the install command below. + +> Note: The `pod-cidr` range must match the Azure Virtual Network's Subnet +> attached the hosts. For example, if the Azure Virtual Network had the range +> `172.0.0.0/16` with Virtual Machines provisioned on an Azure Subnet of +> `172.0.1.0/24`, then the Pod CIDR should also be `172.0.1.0/24`. ```bash docker container run --rm -it \ --name ucp \ - -v /var/run/docker.sock:/var/run/docker.sock \ + --volume /var/run/docker.sock:/var/run/docker.sock \ {{ page.ucp_org }}/{{ page.ucp_repo }}:{{ page.ucp_version }} install \ --host-address \ --pod-cidr \ --cloud-provider Azure \ --interactive ``` - -#### Additional Notes - -- The Kubernetes `pod-cidr` must match the Azure Vnet of the hosts. diff --git a/ee/ucp/admin/install/system-requirements.md b/ee/ucp/admin/install/system-requirements.md index 373e7665d9..9cf6e7dc40 100644 --- a/ee/ucp/admin/install/system-requirements.md +++ b/ee/ucp/admin/install/system-requirements.md @@ -15,16 +15,18 @@ You can install UCP on-premises or on a cloud provider. Common requirements: * [Docker EE Engine](/ee/supported-platforms.md) version {{ site.docker_ee_version }} * Linux kernel version 3.10 or higher -* A static IP address - +* [A static IP address for each node in the cluster](/ee/ucp/admin/install/plan-installation/#static-ip-addresses) + ### Minimum requirements * 8GB of RAM for manager nodes * 4GB of RAM for worker nodes * 2 vCPUs for manager nodes -* 4GB of free disk space for the `/var` partition for manager nodes +* 5GB of free disk space for the `/var` partition for manager nodes (A minimum of 6GB is recommended.) * 500MB of free disk space for the `/var` partition for worker nodes +**Note**: Increased storage is required for Kubernetes manager nodes in UCP 3.1. If you are upgrading to UCP 3.1, refer to [Kubelet restarting after upgrade to Universal Control Plane 3.1](https://success.docker.com/article/kublet-restarting-after-upgrade-to-universal-control-plane-31) for information on how to increase the size of the `/var/lib/kubelet` filesystem. + ### Recommended production requirements * 16GB of RAM for manager nodes diff --git a/ee/ucp/admin/install/upgrade.md b/ee/ucp/admin/install/upgrade.md index 15d8a4f15a..e7e728af4a 100644 --- a/ee/ucp/admin/install/upgrade.md +++ b/ee/ucp/admin/install/upgrade.md @@ -106,6 +106,17 @@ for required configuration values. Once the upgrade finishes, navigate to the UCP web interface and make sure that all the nodes managed by UCP are healthy. +## Recommended upgrade paths + +From UCP 3.0: UCP 3.0 -> UCP 3.1 +From UCP 2.2: UCP 2.2 -> UCP 3.0 +From UCP 2.1: UCP 2.1 -> UCP 2.2 + +If you’re running a UCP version earlier than 2.1, first upgrade to the latest 2.1 version, then upgrade to 2.2. Use these rules for your upgrade path to UCP 2.2: + +From UCP 1.1: UCP 1.1 -> UCP 2.1 -> UCP 2.2 +From UCP 2.0: UCP 2.0 -> UCP 2.1 -> UCP 2.2 + ## Where to go next - [UCP release notes](../../release-notes.md) diff --git a/ee/ucp/admin/monitor-and-troubleshoot/troubleshoot-with-logs.md b/ee/ucp/admin/monitor-and-troubleshoot/troubleshoot-with-logs.md index fe1f7cf6c3..5dc9fe8cd5 100644 --- a/ee/ucp/admin/monitor-and-troubleshoot/troubleshoot-with-logs.md +++ b/ee/ucp/admin/monitor-and-troubleshoot/troubleshoot-with-logs.md @@ -20,6 +20,7 @@ containers to be listed as well. Click on a container to see more details, like its configurations and logs. +![](../../images/troubleshoot-with-logs-2.png){: .with-border} ## Check the logs from the CLI @@ -73,7 +74,7 @@ applications won't be affected by this. To increase the UCP log level, navigate to the UCP web UI, go to the **Admin Settings** tab, and choose **Logs**. -![](../../images/troubleshoot-with-logs-2.png){: .with-border} +![](../../images/troubleshoot-with-logs-3.png){: .with-border} Once you change the log level to **Debug** the UCP containers restart. Now that the UCP components are creating more descriptive logs, you can diff --git a/ee/ucp/authorization/_site/create-teams-with-ldap.html b/ee/ucp/authorization/_site/create-teams-with-ldap.html deleted file mode 100644 index 2868eee2d1..0000000000 --- a/ee/ucp/authorization/_site/create-teams-with-ldap.html +++ /dev/null @@ -1,55 +0,0 @@ -

    To enable LDAP in UCP and sync to your LDAP directory:

    - -
      -
    1. Click Admin Settings under your username drop down.
    2. -
    3. Click Authentication & Authorization.
    4. -
    5. Scroll down and click Yes by LDAP Enabled. A list of LDAP settings displays.
    6. -
    7. Input values to match your LDAP server installation.
    8. -
    9. Test your configuration in UCP.
    10. -
    11. Manually create teams in UCP to mirror those in LDAP.
    12. -
    13. Click Sync Now.
    14. -
    - -

    If Docker EE is configured to sync users with your organization’s LDAP directory -server, you can enable syncing the new team’s members when creating a new team -or when modifying settings of an existing team.

    - -

    For more, see: Integrate with an LDAP Directory.

    - -

    - -

    Binding to the LDAP server

    - -

    There are two methods for matching group members from an LDAP directory, direct -bind and search bind.

    - -

    Select Immediately Sync Team Members to run an LDAP sync operation -immediately after saving the configuration for the team. It may take a moment -before the members of the team are fully synced.

    - -

    Match Group Members (Direct Bind)

    - -

    This option specifies that team members should be synced directly with members -of a group in your organization’s LDAP directory. The team’s membership will by -synced to match the membership of the group.

    - -
      -
    • Group DN: The distinguished name of the group from which to select users.
    • -
    • Group Member Attribute: The value of this group attribute corresponds to -the distinguished names of the members of the group.
    • -
    - -

    Match Search Results (Search Bind)

    - -

    This option specifies that team members should be synced using a search query -against your organization’s LDAP directory. The team’s membership will be -synced to match the users in the search results.

    - -
      -
    • Search Base DN: Distinguished name of the node in the directory tree where -the search should start looking for users.
    • -
    • Search Filter: Filter to find users. If null, existing users in the search -scope are added as members of the team.
    • -
    • Search subtree: Defines search through the full LDAP tree, not just one -level, starting at the Base DN.
    • -
    diff --git a/ee/ucp/authorization/_site/create-users-and-teams-manually.html b/ee/ucp/authorization/_site/create-users-and-teams-manually.html deleted file mode 100644 index 34e4790ce4..0000000000 --- a/ee/ucp/authorization/_site/create-users-and-teams-manually.html +++ /dev/null @@ -1,106 +0,0 @@ -

    Users, teams, and organizations are referred to as subjects in Docker EE.

    - -

    Individual users can belong to one or more teams but each team can only be in -one organization. At the fictional startup, Acme Company, all teams in the -organization are necessarily unique but the user, Alex, is on two teams:

    - -
    acme-datacenter
    -├── dba
    -│   └── Alex*
    -├── dev
    -│   └── Bett
    -└── ops
    -    ├── Alex*
    -    └── Chad
    -
    - -

    Authentication

    - -

    All users are authenticated on the backend. Docker EE provides built-in -authentication and also integrates with LDAP directory services.

    - -

    To use Docker EE’s built-in authentication, you must create users manually.

    - -
    -

    To enable LDAP and authenticate and synchronize UCP users and teams with your -organization’s LDAP directory, see:

    - -
    - -

    Build an organization architecture

    - -

    The general flow of designing an organization with teams in UCP is:

    - -
      -
    1. Create an organization.
    2. -
    3. Add users or enable LDAD (for syncing users).
    4. -
    5. Create teams under the organization.
    6. -
    7. Add users to teams manually or sync with LDAP.
    8. -
    - -

    Create an organization with teams

    - -

    To create an organization in UCP:

    - -
      -
    1. Click Organization & Teams under User Management.
    2. -
    3. Click Create Organization.
    4. -
    5. Input the organization name.
    6. -
    7. Click Create.
    8. -
    - -

    To create teams in the organization:

    - -
      -
    1. Click through the organization name.
    2. -
    3. Click Create Team.
    4. -
    5. Input a team name (and description).
    6. -
    7. Click Create.
    8. -
    9. Add existing users to the team. To sync LDAP users, see: Integrate with an LDAP Directory. -
        -
      • Click the team name and select Actions > Add Users.
      • -
      • Check the users to include and click Add Users.
      • -
      -
    10. -
    - -
    -

    Note: To sync teams with groups in an LDAP server, see Sync Teams with LDAP.

    -
    - -

    Create users manually

    - -

    New users are assigned a default permission level so that they can access the -cluster. To extend a user’s default permissions, add them to a team and create grants. You can optionally grant them Docker EE -administrator permissions.

    - -

    To manually create users in UCP:

    - -
      -
    1. Click Users under User Management.
    2. -
    3. Click Create User.
    4. -
    5. Input username, password, and full name.
    6. -
    7. Click Create.
    8. -
    9. Optionally, check “Is a Docker EE Admin” to give the user administrator -privileges.
    10. -
    - -
    -

    A Docker EE Admin can grant users permission to change the cluster -configuration and manage grants, roles, and resource sets.

    -
    - -

    -

    - -

    Where to go next

    - - diff --git a/ee/ucp/authorization/_site/define-roles.html b/ee/ucp/authorization/_site/define-roles.html deleted file mode 100644 index 7fe6660603..0000000000 --- a/ee/ucp/authorization/_site/define-roles.html +++ /dev/null @@ -1,77 +0,0 @@ -

    A role defines a set of API operations permitted against a resource set. -You apply roles to users and teams by creating grants.

    - -

    Diagram showing UCP permission levels

    - -

    Default roles

    - -

    You can define custom roles or use the following built-in roles:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Built-in roleDescription
    NoneUsers have no access to Swarm or Kubernetes resources. Maps to No Access role in UCP 2.1.x.
    View OnlyUsers can view resources but can’t create them.
    Restricted ControlUsers can view and edit resources but can’t run a service or container in a way that affects the node where it’s running. Users cannot mount a node directory, exec into containers, or run containers in privileged mode or with additional kernel capabilities.
    SchedulerUsers can view nodes (worker and manager) and schedule (not view) workloads on these nodes. By default, all users are granted the Scheduler role against the /Shared collection. (To view workloads, users need permissions such as Container View).
    Full ControlUsers can view and edit all granted resources. They can create containers without any restriction, but can’t see the containers of other users.
    - -

    Create a custom role

    - -

    The Roles page lists all default and custom roles applicable in the -organization.

    - -

    You can give a role a global name, such as “Remove Images”, which might enable the -Remove and Force Remove operations for images. You can apply a role with -the same name to different resource sets.

    - -
      -
    1. Click Roles under User Management.
    2. -
    3. Click Create Role.
    4. -
    5. Input the role name on the Details page.
    6. -
    7. Click Operations. All available API operations are displayed.
    8. -
    9. Select the permitted operations per resource type.
    10. -
    11. Click Create.
    12. -
    - -

    - -
    -

    Some important rules regarding roles:

    -
      -
    • Roles are always enabled.
    • -
    • Roles can’t be edited. To edit a role, you must delete and recreate it.
    • -
    • Roles used within a grant can be deleted only after first deleting the grant.
    • -
    • Only administrators can create and delete roles.
    • -
    -
    - -

    Where to go next

    - - diff --git a/ee/ucp/authorization/_site/deploy-stateless-app.html b/ee/ucp/authorization/_site/deploy-stateless-app.html deleted file mode 100644 index eaafe2dcfa..0000000000 --- a/ee/ucp/authorization/_site/deploy-stateless-app.html +++ /dev/null @@ -1,191 +0,0 @@ -

    This tutorial explains how to deploy a NGINX web server and limit access to one -team with role-based access control (RBAC).

    - -

    Scenario

    - -

    You are the Docker EE system administrator at Acme Company and need to configure -permissions to company resources. The best way to do this is to:

    - -
      -
    • Build the organization with teams and users.
    • -
    • Define roles with allowable operations per resource types, like -permission to run containers.
    • -
    • Create collections or namespaces for accessing actual resources.
    • -
    • Create grants that join team + role + resource set.
    • -
    - -

    Build the organization

    - -

    Add the organization, acme-datacenter, and create three teams according to the -following structure:

    - -
    acme-datacenter
    -├── dba
    -│   └── Alex Alutin
    -├── dev
    -│   └── Bett Bhatia
    -└── ops
    -    └── Chad Chavez
    -
    - -

    Learn to create and configure users and teams.

    - -

    Kubernetes deployment

    - -

    In this section, we deploy NGINX with Kubernetes. See Swarm stack -for the same exercise with Swarm.

    - -

    Create namespace

    - -

    Create a namespace to logically store the NGINX application:

    - -
      -
    1. Click Kubernetes > Namespaces.
    2. -
    3. Paste the following manifest in the terminal window and click Create.
    4. -
    - -
    apiVersion: v1
    -kind: Namespace
    -metadata:
    -  name: nginx-namespace
    -
    - -

    Define roles

    - -

    You can use the built-in roles or define your own. For this exercise, create a -simple role for the ops team:

    - -
      -
    1. Click Roles under User Management.
    2. -
    3. Click Create Role.
    4. -
    5. On the Details tab, name the role Kube Deploy.
    6. -
    7. On the Operations tab, check all Kubernetes Deployment Operations.
    8. -
    9. Click Create.
    10. -
    - -

    Learn to create and configure users and teams.

    - -

    Grant access

    - -

    Grant the ops team (and only the ops team) access to nginx-namespace with the -custom role, Kube Deploy.

    - -
    acme-datacenter/ops + Kube Deploy + nginx-namespace
    -
    - -

    Deploy NGINX

    - -

    You’ve configured Docker EE. The ops team can now deploy nginx.

    - -
      -
    1. Log on to UCP as “chad” (on the opsteam).
    2. -
    3. Click Kubernetes > Namespaces.
    4. -
    5. Paste the following manifest in the terminal window and click Create.
    6. -
    - -
    apiVersion: apps/v1beta2  # Use apps/v1beta1 for versions < 1.8.0
    -kind: Deployment
    -metadata:
    -  name: nginx-deployment
    -spec:
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  replicas: 2
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - name: nginx
    -        image: nginx:latest
    -        ports:
    -        - containerPort: 80
    -
    - -
      -
    1. Log on to UCP as each user and ensure that: -
        -
      • dba (alex) can’t see nginx-namespace.
      • -
      • dev (bett) can’t see nginx-namespace.
      • -
      -
    2. -
    - -

    Swarm stack

    - -

    In this section, we deploy nginx as a Swarm service. See Kubernetes Deployment -for the same exercise with Kubernetes.

    - -

    Create collection paths

    - -

    Create a collection for NGINX resources, nested under the /Shared collection:

    - -
    /
    -├── System
    -└── Shared
    -    └── nginx-collection
    -
    - -
    -

    Tip: To drill into a collection, click View Children.

    -
    - -

    Learn to group and isolate cluster resources.

    - -

    Define roles

    - -

    You can use the built-in roles or define your own. For this exercise, create a -simple role for the ops team:

    - -
      -
    1. Click Roles under User Management.
    2. -
    3. Click Create Role.
    4. -
    5. On the Details tab, name the role Swarm Deploy.
    6. -
    7. On the Operations tab, check all Service Operations.
    8. -
    9. Click Create.
    10. -
    - -

    Learn to create and configure users and teams.

    - -

    Grant access

    - -

    Grant the ops team (and only the ops team) access to nginx-collection with -the built-in role, Swarm Deploy.

    - -
    acme-datacenter/ops + Swarm Deploy + /Shared/nginx-collection
    -
    - -

    Learn to grant role-access to cluster resources.

    - -

    Deploy NGINX

    - -

    You’ve configured Docker EE. The ops team can now deploy an nginx Swarm -service.

    - -
      -
    1. Log on to UCP as chad (on the opsteam).
    2. -
    3. Click Swarm > Services.
    4. -
    5. Click Create Stack.
    6. -
    7. On the Details tab, enter: -
        -
      • Name: nginx-service
      • -
      • Image: nginx:latest
      • -
      -
    8. -
    9. On the Collections tab: -
        -
      • Click /Shared in the breadcrumbs.
      • -
      • Select nginx-collection.
      • -
      -
    10. -
    11. Click Create.
    12. -
    13. Log on to UCP as each user and ensure that: -
        -
      • dba (alex) cannot see nginx-collection.
      • -
      • dev (bett) cannot see nginx-collection.
      • -
      -
    14. -
    - diff --git a/ee/ucp/authorization/_site/ee-advanced.html b/ee/ucp/authorization/_site/ee-advanced.html deleted file mode 100644 index 44cb8d7cf6..0000000000 --- a/ee/ucp/authorization/_site/ee-advanced.html +++ /dev/null @@ -1,137 +0,0 @@ -

    Go through the Docker Enterprise Standard tutorial, -before continuing here with Docker Enterprise Advanced.

    - -

    In the first tutorial, the fictional company, OrcaBank, designed an architecture -with role-based access control (RBAC) to meet their organization’s security -needs. They assigned multiple grants to fine-tune access to resources across -collection boundaries on a single platform.

    - -

    In this tutorial, OrcaBank implements new and more stringent security -requirements for production applications:

    - -

    First, OrcaBank adds staging zone to their deployment model. They will no longer -move developed appliciatons directly in to production. Instead, they will deploy -apps from their dev cluster to staging for testing, and then to production.

    - -

    Second, production applications are no longer permitted to share any physical -infrastructure with non-production infrastructure. OrcaBank segments the -scheduling and access of applications with Node Access Control.

    - -
    -

    Node Access Control is a feature of Docker EE -Advanced and provides secure multi-tenancy with node-based isolation. Nodes -can be placed in different collections so that resources can be scheduled and -isolated on disparate physical or virtual hardware resources.

    -
    - -

    Team access requirements

    - -

    OrcaBank still has three application teams, payments, mobile, and db with -varying levels of segmentation between them.

    - -

    Their RBAC redesign is going to organize their UCP cluster into two top-level -collections, staging and production, which are completely separate security -zones on separate physical infrastructure.

    - -

    OrcaBank’s four teams now have different needs in production and staging:

    - -
      -
    • security should have view-only access to all applications in production (but -not staging).
    • -
    • db should have full access to all database applications and resources in -production (but not staging). See DB Team.
    • -
    • mobile should have full access to their Mobile applications in both -production and staging and limited access to shared db services. See -Mobile Team.
    • -
    • payments should have full access to their Payments applications in both -production and staging and limited access to shared db services.
    • -
    - -

    Role composition

    - -

    OrcaBank has decided to replace their custom Ops role with the built-in -Full Control role.

    - -
      -
    • View Only (default role) allows users to see but not edit all cluster -resources.
    • -
    • Full Control (default role) allows users complete control of all collections -granted to them. They can also create containers without restriction but -cannot see the containers of other users.
    • -
    • View & Use Networks + Secrets (custom role) enables users to view/connect -to networks and view/use secrets used by db containers, but prevents them -from seeing or impacting the db applications themselves.
    • -
    - -

    image

    - -

    Collection architecture

    - -

    In the previous tutorial, OrcaBank created separate collections for each -application team and nested them all under /Shared.

    - -

    To meet their new security requirements for production, OrcaBank is redesigning -collections in two ways:

    - -
      -
    • Adding collections for both the production and staging zones, and nesting a -set of application collections under each.
    • -
    • Segmenting nodes. Both the production and staging zones will have dedicated -nodes; and in production, each application will be on a dedicated node.
    • -
    - -

    The collection architecture now has the following tree representation:

    - -
    /
    -├── System
    -├── Shared
    -├── prod
    -│   ├── mobile
    -│   ├── payments
    -│   └── db
    -│       ├── mobile
    -│       └── payments
    -|
    -└── staging
    -    ├── mobile
    -    └── payments
    -
    - -

    Grant composition

    - -

    OrcaBank must now diversify their grants further to ensure the proper division -of access.

    - -

    The payments and mobile application teams will have three grants each–one -for deploying to production, one for deploying to staging, and the same grant to -access shared db networks and secrets.

    - -

    image

    - -

    OrcaBank access architecture

    - -

    The resulting access architecture, designed with Docker EE Advanced, provides -physical segmentation between production and staging using node access control.

    - -

    Applications are scheduled only on UCP worker nodes in the dedicated application -collection. And applications use shared resources across collection boundaries -to access the databases in the /prod/db collection.

    - -

    image

    - -

    DB team

    - -

    The OrcaBank db team is responsible for deploying and managing the full -lifecycle of the databases that are in production. They have the full set of -operations against all database resources.

    - -

    image

    - -

    Mobile team

    - -

    The mobile team is responsible for deploying their full application stack in -staging. In production they deploy their own applications but use the databases -that are provided by the db team.

    - -

    image

    - diff --git a/ee/ucp/authorization/_site/ee-standard.html b/ee/ucp/authorization/_site/ee-standard.html deleted file mode 100644 index 791cd3271b..0000000000 --- a/ee/ucp/authorization/_site/ee-standard.html +++ /dev/null @@ -1,137 +0,0 @@ -

    Collections and grants are strong tools that can be used to control -access and visibility to resources in UCP.

    - -

    This tutorial describes a fictitious company named OrcaBank that needs to -configure an architecture in UCP with role-based access control (RBAC) for -their application engineering group.

    - -

    Team access requirements

    - -

    OrcaBank reorganized their application teams by product with each team providing -shared services as necessary. Developers at OrcaBank do their own DevOps and -deploy and manage the lifecycle of their applications.

    - -

    OrcaBank has four teams with the following resource needs:

    - -
      -
    • security should have view-only access to all applications in the cluster.
    • -
    • db should have full access to all database applications and resources. See -DB Team.
    • -
    • mobile should have full access to their mobile applications and limited -access to shared db services. See Mobile Team.
    • -
    • payments should have full access to their payments applications and limited -access to shared db services.
    • -
    - -

    Role composition

    - -

    To assign the proper access, OrcaBank is employing a combination of default -and custom roles:

    - -
      -
    • View Only (default role) allows users to see all resources (but not edit or use).
    • -
    • Ops (custom role) allows users to perform all operations against configs, -containers, images, networks, nodes, secrets, services, and volumes.
    • -
    • View & Use Networks + Secrets (custom role) enables users to view/connect to -networks and view/use secrets used by db containers, but prevents them from -seeing or impacting the db applications themselves.
    • -
    - -

    image

    - -

    Collection architecture

    - -

    OrcaBank is also creating collections of resources to mirror their team -structure.

    - -

    Currently, all OrcaBank applications share the same physical resources, so all -nodes and applications are being configured in collections that nest under the -built-in collection, /Shared.

    - -

    Other collections are also being created to enable shared db applications.

    - -
    -

    Note: For increased security with node-based isolation, use Docker -Enterprise Advanced.

    -
    - -
      -
    • /Shared/mobile hosts all Mobile applications and resources.
    • -
    • /Shared/payments hosts all Payments applications and resources.
    • -
    • /Shared/db is a top-level collection for all db resources.
    • -
    • /Shared/db/payments is a collection of db resources for Payments applications.
    • -
    • /Shared/db/mobile is a collection of db resources for Mobile applications.
    • -
    - -

    The collection architecture has the following tree representation:

    - -
    /
    -├── System
    -└── Shared
    -    ├── mobile
    -    ├── payments
    -    └── db
    -        ├── mobile
    -        └── payments
    -
    - -

    OrcaBank’s Grant composition ensures that their collection -architecture gives the db team access to all db resources and restricts -app teams to shared db resources.

    - -

    LDAP/AD integration

    - -

    OrcaBank has standardized on LDAP for centralized authentication to help their -identity team scale across all the platforms they manage.

    - -

    To implement LDAP authentication in UCP, OrcaBank is using UCP’s native LDAP/AD -integration to map LDAP groups directly to UCP teams. Users can be added to or -removed from UCP teams via LDAP which can be managed centrally by OrcaBank’s -identity team.

    - -

    The following grant composition shows how LDAP groups are mapped to UCP teams.

    - -

    Grant composition

    - -

    OrcaBank is taking advantage of the flexibility in UCP’s grant model by applying -two grants to each application team. One grant allows each team to fully -manage the apps in their own collection, and the second grant gives them the -(limited) access they need to networks and secrets within the db collection.

    - -

    image

    - -

    OrcaBank access architecture

    - -

    OrcaBank’s resulting access architecture shows applications connecting across -collection boundaries. By assigning multiple grants per team, the Mobile and -Payments applications teams can connect to dedicated Database resources through -a secure and controlled interface, leveraging Database networks and secrets.

    - -
    -

    Note: In Docker Enterprise Standard, all resources are deployed across the -same group of UCP worker nodes. Node segmentation is provided in Docker -Enterprise Advanced and discussed in the next tutorial.

    -
    - -

    image

    - -

    DB team

    - -

    The db team is responsible for deploying and managing the full lifecycle -of the databases used by the application teams. They can execute the full set of -operations against all database resources.

    - -

    image

    - -

    Mobile team

    - -

    The mobile team is responsible for deploying their own application stack, -minus the database tier that is managed by the db team.

    - -

    image

    - -

    Where to go next

    - - diff --git a/ee/ucp/authorization/_site/grant-permissions.html b/ee/ucp/authorization/_site/grant-permissions.html deleted file mode 100644 index 17bec8115c..0000000000 --- a/ee/ucp/authorization/_site/grant-permissions.html +++ /dev/null @@ -1,77 +0,0 @@ -

    Docker EE administrators can create grants to control how users and -organizations access resource sets.

    - -

    A grant defines who has how much access to what resources. Each grant is a -1:1:1 mapping of subject, role, and resource set. For example, you can -grant the “Prod Team” “Restricted Control” over services in the “/Production” -collection.

    - -

    A common workflow for creating grants has four steps:

    - -
      -
    • Add and configure subjects (users, teams, and service accounts).
    • -
    • Define custom roles (or use defaults) by adding permitted API operations -per type of resource.
    • -
    • Group cluster resources into Swarm collections or Kubernetes namespaces.
    • -
    • Create grants by combining subject + role + resource set.
    • -
    - -

    Kubernetes grants

    - -

    With Kubernetes orchestration, a grant is made up of subject, role, and -namespace.

    - -
    -

    This section assumes that you have created objects for the grant: subject, role, -namespace.

    -
    - -

    To create a Kubernetes grant in UCP:

    - -
      -
    1. Click Grants under User Management.
    2. -
    3. Click Create Grant.
    4. -
    5. Click Namespaces under Kubernetes.
    6. -
    7. Find the desired namespace and click Select Namespace.
    8. -
    9. On the Roles tab, select a role.
    10. -
    11. On the Subjects tab, select a user, team, organization, or service -account to authorize.
    12. -
    13. Click Create.
    14. -
    - -

    Swarm grants

    - -

    With Swarm orchestration, a grant is made up of subject, role, and -collection.

    - -
    -

    This section assumes that you have created objects to grant: teams/users, -roles (built-in or custom), and a collection.

    -
    - -

    -

    - -

    To create a grant in UCP:

    - -
      -
    1. Click Grants under User Management.
    2. -
    3. Click Create Grant.
    4. -
    5. On the Collections tab, click Collections (for Swarm).
    6. -
    7. Click View Children until you get to the desired collection and Select.
    8. -
    9. On the Roles tab, select a role.
    10. -
    11. On the Subjects tab, select a user, team, or organization to authorize.
    12. -
    13. Click Create.
    14. -
    - -
    -

    By default, all new users are placed in the docker-datacenter organization. -To apply permissions to all Docker EE users, create a grant with the -docker-datacenter org as a subject.

    -
    - -

    Where to go next

    - - diff --git a/ee/ucp/authorization/_site/group-resources.html b/ee/ucp/authorization/_site/group-resources.html deleted file mode 100644 index 1ce779c9f8..0000000000 --- a/ee/ucp/authorization/_site/group-resources.html +++ /dev/null @@ -1,136 +0,0 @@ -

    Docker EE enables access control to cluster resources by grouping resources -into resource sets. Combine resource sets with grants -to give users permission to access specific cluster resources.

    - -

    A resource set can be:

    - -
      -
    • A Kubernetes namespace for Kubernetes workloads.
    • -
    • A UCP collection for Swarm workloads.
    • -
    - -

    Kubernetes namespaces

    - -

    A namespace allows you to group resources like Pods, Deployments, Services, or -any other Kubernetes-specific resources. You can then enforce RBAC policies -and resource quotas for the namespace.

    - -

    Each Kubernetes resources can only be in one namespace, and namespaces cannot -be nested inside one another.

    - -

    Learn more about Kubernetes namespaces.

    - -

    Swarm collections

    - -

    A Swarm collection is a directory of cluster resources like nodes, services, -volumes, or other Swarm-specific resources.

    - -

    - -

    Each Swarm resource can only be in one collection at a time, but collections -can be nested inside one another, to create hierarchies.

    - -

    Nested collections

    - -

    You can nest collections inside one another. If a user is granted permissions -for one collection, they’ll have permissions for its child collections, -pretty much like a directory structure..

    - -

    For a child collection, or for a user who belongs to more than one team, the -system concatenates permissions from multiple roles into an “effective role” for -the user, which specifies the operations that are allowed against the target.

    - -

    Built-in collections

    - -

    Docker EE provides a number of built-in collections.

    - -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Default collectionDescription
    /Path to all resources in the Swarm cluster. Resources not in a collection are put here.
    /SystemPath to UCP managers, DTR nodes, and UCP/DTR system services. By default, only admins have access, but this is configurable.
    /SharedDefault path to all worker nodes for scheduling. In Docker EE Standard, all worker nodes are located here. In Docker EE Advanced, worker nodes can be moved and isolated.
    /Shared/Private/Path to a user’s private collection.
    /Shared/LegacyPath to the access control labels of legacy versions (UCP 2.1 and lower).
    - -

    Default collections

    - -

    Each user has a default collection which can be changed in UCP preferences.

    - -

    Users can’t deploy a resource without a collection. When a user deploys a -resource without an access label, Docker EE automatically places the resource in -the user’s default collection. Learn how to add labels to nodes.

    - -

    With Docker Compose, the system applies default collection labels across all -resources in the stack unless com.docker.ucp.access.label has been explicitly -set.

    - -
    -

    Default collections and collection labels

    - -

    Default collections are good for users who work only on a well-defined slice of -the system, as well as users who deploy stacks and don’t want to edit the -contents of their compose files. A user with more versatile roles in the -system, such as an administrator, might find it better to set custom labels for -each resource.

    -
    - -

    Collections and labels

    - -

    Resources are marked as being in a collection by using labels. Some resource -types don’t have editable labels, so you can’t move them across collections.

    - -
    -

    Can edit labels: services, nodes, secrets, and configs -Cannot edit labels: containers, networks, and volumes

    -
    - -

    For editable resources, you can change the com.docker.ucp.access.label to move -resources to different collections. For example, you may need deploy resources -to a collection other than your default collection.

    - -

    The system uses the additional labels, com.docker.ucp.collection.*, to enable -efficient resource lookups. By default, nodes have the -com.docker.ucp.collection.root, com.docker.ucp.collection.shared, and -com.docker.ucp.collection.swarm labels set to true. UCP -automatically controls these labels, and you don’t need to manage them.

    - -

    Collections get generic default names, but you can give them meaningful names, -like “Dev”, “Test”, and “Prod”.

    - -

    A stack is a group of resources identified by a label. You can place the -stack’s resources in multiple collections. Resources are placed in the user’s -default collection unless you specify an explicit com.docker.ucp.access.label -within the stack/compose file.

    - -

    Where to go next

    - - diff --git a/ee/ucp/authorization/_site/index.html b/ee/ucp/authorization/_site/index.html deleted file mode 100644 index bd8fc3f9c3..0000000000 --- a/ee/ucp/authorization/_site/index.html +++ /dev/null @@ -1,110 +0,0 @@ -

    Docker Universal Control Plane (UCP), -the UI for Docker EE, lets you -authorize users to view, edit, and use cluster resources by granting role-based -permissions against resource sets.

    - -

    To authorize access to cluster resources across your organization, UCP -administrators might take the following high-level steps:

    - -
      -
    • Add and configure subjects (users, teams, and service accounts).
    • -
    • Define custom roles (or use defaults) by adding permitted operations per -type of resource.
    • -
    • Group cluster resources into resource sets of Swarm collections or -Kubernetes namespaces.
    • -
    • Create grants by combining subject + role + resource set.
    • -
    - -

    For an example, see Deploy stateless app with RBAC.

    - -

    Subjects

    - -

    A subject represents a user, team, organization, or service account. A subject -can be granted a role that defines permitted operations against one or more -resource sets.

    - -
      -
    • User: A person authenticated by the authentication backend. Users can -belong to one or more teams and one or more organizations.
    • -
    • Team: A group of users that share permissions defined at the team level. A -team can be in one organization only.
    • -
    • Organization: A group of teams that share a specific set of permissions, -defined by the roles of the organization.
    • -
    • Service account: A Kubernetes object that enables a workload to access -cluster resources that are assigned to a namespace.
    • -
    - -

    Learn to create and configure users and teams.

    - -

    Roles

    - -

    Roles define what operations can be done by whom. A role is a set of permitted -operations against a type of resource, like a container or volume, that’s -assigned to a user or team with a grant.

    - -

    For example, the built-in role, Restricted Control, includes permission to -view and schedule nodes but not to update nodes. A custom DBA role might -include permissions to r-w-x volumes and secrets.

    - -

    Most organizations use multiple roles to fine-tune the appropriate access. A -given team or user may have different roles provided to them depending on what -resource they are accessing.

    - -

    Learn to define roles with authorized API operations.

    - -

    Resource sets

    - -

    To control user access, cluster resources are grouped into Docker Swarm -collections or Kubernetes namespaces.

    - -
      -
    • -

      Swarm collections: A collection has a directory-like structure that holds -Swarm resources. You can create collections in UCP by defining a directory path -and moving resources into it. Also, you can create the path in UCP and use -labels in your YAML file to assign application resources to the path. -Resource types that users can access in a Swarm collection include containers, -networks, nodes, services, secrets, and volumes.

      -
    • -
    • -

      Kubernetes namespaces: A -namespace -is a logical area for a Kubernetes cluster. Kubernetes comes with a default -namespace for your cluster objects, plus two more namespaces for system and -public resources. You can create custom namespaces, but unlike Swarm -collections, namespaces can’t be nested. Resource types that users can -access in a Kubernetes namespace include pods, deployments, network policies, -nodes, services, secrets, and many more.

      -
    • -
    - -

    Together, collections and namespaces are named resource sets. Learn to -group and isolate cluster resources.

    - -

    Grants

    - -

    A grant is made up of subject, role, and resource set.

    - -

    Grants define which users can access what resources in what way. Grants are -effectively Access Control Lists (ACLs), and when grouped together, they -provide comprehensive access policies for an entire organization.

    - -

    Only an administrator can manage grants, subjects, roles, and access to -resources.

    - -
    -

    About administrators

    - -

    An administrator is a user who creates subjects, groups resources by moving them -into collections or namespaces, defines roles by selecting allowable operations, -and applies grants to users and teams.

    -
    - -

    Where to go next

    - - diff --git a/ee/ucp/authorization/_site/isolate-nodes.html b/ee/ucp/authorization/_site/isolate-nodes.html deleted file mode 100644 index c307ebc382..0000000000 --- a/ee/ucp/authorization/_site/isolate-nodes.html +++ /dev/null @@ -1,315 +0,0 @@ -

    With Docker EE Advanced, you can enable physical isolation of resources -by organizing nodes into collections and granting Scheduler access for -different users. To control access to nodes, move them to dedicated collections -where you can grant access to specific users, teams, and organizations.

    - -

    - -

    In this example, a team gets access to a node collection and a resource -collection, and UCP access control ensures that the team members can’t view -or use swarm resources that aren’t in their collection.

    - -

    You need a Docker EE Advanced license and at least two worker nodes to -complete this example.

    - -
      -
    1. Create an Ops team and assign a user to it.
    2. -
    3. Create a /Prod collection for the team’s node.
    4. -
    5. Assign a worker node to the /Prod collection.
    6. -
    7. Grant the Ops teams access to its collection.
    8. -
    - -

    - -

    Create a team

    - -

    In the web UI, navigate to the Organizations & Teams page to create a team -named “Ops” in your organization. Add a user who isn’t a UCP administrator to -the team. -Learn to create and manage teams.

    - -

    Create a node collection and a resource collection

    - -

    In this example, the Ops team uses an assigned group of nodes, which it -accesses through a collection. Also, the team has a separate collection -for its resources.

    - -

    Create two collections: one for the team’s worker nodes and another for the -team’s resources.

    - -
      -
    1. Navigate to the Collections page to view all of the resource -collections in the swarm.
    2. -
    3. Click Create collection and name the new collection “Prod”.
    4. -
    5. Click Create to create the collection.
    6. -
    7. Find Prod in the list, and click View children.
    8. -
    9. Click Create collection, and name the child collection -“Webserver”. This creates a sub-collection for access control.
    10. -
    - -

    You’ve created two new collections. The /Prod collection is for the worker -nodes, and the /Prod/Webserver sub-collection is for access control to -an application that you’ll deploy on the corresponding worker nodes.

    - -

    Move a worker node to a collection

    - -

    By default, worker nodes are located in the /Shared collection. -Worker nodes that are running DTR are assigned to the /System collection. -To control access to the team’s nodes, move them to a dedicated collection.

    - -

    Move a worker node by changing the value of its access label key, -com.docker.ucp.access.label, to a different collection.

    - -
      -
    1. Navigate to the Nodes page to view all of the nodes in the swarm.
    2. -
    3. Click a worker node, and in the details pane, find its Collection. -If it’s in the /System collection, click another worker node, -because you can’t move nodes that are in the /System collection. By -default, worker nodes are assigned to the /Shared collection.
    4. -
    5. When you’ve found an available node, in the details pane, click -Configure.
    6. -
    7. In the Labels section, find com.docker.ucp.access.label and change -its value from /Shared to /Prod.
    8. -
    9. Click Save to move the node to the /Prod collection.
    10. -
    - -
    -

    Docker EE Advanced required

    - -

    If you don’t have a Docker EE Advanced license, you’ll get the following -error message when you try to change the access label: -Nodes must be in either the shared or system collection without an advanced license. -Get a Docker EE Advanced license.

    -
    - -

    - -

    Grant access for a team

    - -

    You need two grants to control access to nodes and container resources:

    - -
      -
    • Grant the Ops team the Restricted Control role for the /Prod/Webserver -resources.
    • -
    • Grant the Ops team the Scheduler role against the nodes in the /Prod -collection.
    • -
    - -

    Create two grants for team access to the two collections:

    - -
      -
    1. Navigate to the Grants page and click Create Grant.
    2. -
    3. In the left pane, click Resource Sets, and in the Swarm collection, -click View Children.
    4. -
    5. In the Prod collection, click View Children.
    6. -
    7. In the Webserver collection, click Select Collection.
    8. -
    9. In the left pane, click Roles, and select Restricted Control -in the dropdown.
    10. -
    11. Click Subjects, and under Select subject type, click Organizations.
    12. -
    13. Select your organization, and in the Team dropdown, select Ops.
    14. -
    15. Click Create to grant the Ops team access to the /Prod/Webserver -collection.
    16. -
    - -

    The same steps apply for the nodes in the /Prod collection.

    - -
      -
    1. Navigate to the Grants page and click Create Grant.
    2. -
    3. In the left pane, click Collections, and in the Swarm collection, -click View Children.
    4. -
    5. In the Prod collection, click Select Collection.
    6. -
    7. In the left pane, click Roles, and in the dropdown, select Scheduler.
    8. -
    9. In the left pane, click Subjects, and under Select subject type, click -Organizations.
    10. -
    11. Select your organization, and in the Team dropdown, select Ops .
    12. -
    13. Click Create to grant the Ops team Scheduler access to the nodes in the -/Prod collection.
    14. -
    - -

    - -

    The cluster is set up for node isolation. Users with access to nodes in the -/Prod collection can deploy Swarm services -and Kubernetes apps, and their workloads -won’t be scheduled on nodes that aren’t in the collection.

    - -

    Deploy a Swarm service as a team member

    - -

    When a user deploys a Swarm service, UCP assigns its resources to the user’s -default collection.

    - -

    From the target collection of a resource, UCP walks up the ancestor collections -until it finds the highest ancestor that the user has Scheduler access to. -Tasks are scheduled on any nodes in the tree below this ancestor. In this example, -UCP assigns the user’s service to the /Prod/Webserver collection and schedules -tasks on nodes in the /Prod collection.

    - -

    As a user on the Ops team, set your default collection to /Prod/Webserver.

    - -
      -
    1. Log in as a user on the Ops team.
    2. -
    3. Navigate to the Collections page, and in the Prod collection, -click View Children.
    4. -
    5. In the Webserver collection, click the More Options icon and -select Set to default.
    6. -
    - -

    Deploy a service automatically to worker nodes in the /Prod collection. -All resources are deployed under the user’s default collection, -/Prod/Webserver, and the containers are scheduled only on the nodes under -/Prod.

    - -
      -
    1. Navigate to the Services page, and click Create Service.
    2. -
    3. Name the service “NGINX”, use the “nginx:latest” image, and click -Create.
    4. -
    5. When the nginx service status is green, click the service. In the -details view, click Inspect Resource, and in the dropdown, select -Containers.
    6. -
    7. -

      Click the NGINX container, and in the details pane, confirm that its -Collection is /Prod/Webserver.

      - -

      -
    8. -
    9. Click Inspect Resource, and in the dropdown, select Nodes.
    10. -
    11. -

      Click the node, and in the details pane, confirm that its Collection -is /Prod.

      - -

      -
    12. -
    - -

    Alternative: Use a grant instead of the default collection

    - -

    Another approach is to use a grant instead of changing the user’s default -collection. An administrator can create a grant for a role that has the -Service Create permission against the /Prod/Webserver collection or a child -collection. In this case, the user sets the value of the service’s access label, -com.docker.ucp.access.label, to the new collection or one of its children -that has a Service Create grant for the user.

    - -

    Deploy a Kubernetes application

    - -

    Starting in Docker Enterprise Edition 2.0, you can deploy a Kubernetes workload -to worker nodes, based on a Kubernetes namespace.

    - -
      -
    1. Convert a node to use the Kubernetes orchestrator.
    2. -
    3. Create a Kubernetes namespace.
    4. -
    5. Create a grant for the namespace.
    6. -
    7. Link the namespace to a node collection.
    8. -
    9. Deploy a Kubernetes workload.
    10. -
    - -

    Convert a node to Kubernetes

    - -

    To deploy Kubernetes workloads, an administrator must convert a worker node to -use the Kubernetes orchestrator. -Learn how to set the orchestrator type -for your nodes in the /Prod collection.

    - -

    Create a Kubernetes namespace

    - -

    An administrator must create a Kubernetes namespace to enable node isolation -for Kubernetes workloads.

    - -
      -
    1. In the left pane, click Kubernetes.
    2. -
    3. Click Create to open the Create Kubernetes Object page.
    4. -
    5. -

      In the Object YAML editor, paste the following YAML.

      - -
      apiVersion: v1
      -kind: Namespace
      -metadata:
      -  Name: ops-nodes
      -
      -
    6. -
    7. Click Create to create the ops-nodes namespace.
    8. -
    - -

    Grant access to the Kubernetes namespace

    - -

    Create a grant to the ops-nodes namespace for the Ops team by following the -same steps that you used to grant access to the /Prod collection, only this -time, on the Create Grant page, pick Namespaces, instead of -Collections.

    - -

    - -

    Select the ops-nodes namespace, and create a Full Control grant for the -Ops team.

    - -

    - - - -

    The last step is to link the Kubernetes namespace the /Prod collection.

    - -
      -
    1. Navigate to the Namespaces page, and find the ops-nodes namespace -in the list.
    2. -
    3. -

      Click the More options icon and select Link nodes in collection.

      - -

      -
    4. -
    5. In the Choose collection section, click View children on the -Swarm collection to navigate to the Prod collection.
    6. -
    7. On the Prod collection, click Select collection.
    8. -
    9. -

      Click Confirm to link the namespace to the collection.

      - -

      -
    10. -
    - -

    Deploy a Kubernetes workload to the node collection

    - -
      -
    1. Log in in as a non-admin who’s on the Ops team.
    2. -
    3. In the left pane, open the Kubernetes section.
    4. -
    5. Confirm that ops-nodes is displayed under Namespaces.
    6. -
    7. -

      Click Create, and in the Object YAML editor, paste the following -YAML definition for an NGINX server.

      - -
      apiVersion: v1
      -kind: ReplicationController
      -metadata:
      -  name: nginx
      -spec:
      -  replicas: 1
      -  selector:
      -    app: nginx
      -  template:
      -    metadata:
      -      name: nginx
      -      labels:
      -        app: nginx
      -    spec:
      -      containers:
      -      - name: nginx
      -        image: nginx
      -        ports:
      -        - containerPort: 80
      -
      - -

      -
    8. -
    9. Click Create to deploy the workload.
    10. -
    11. -

      In the left pane, click Pods and confirm that the workload is running -on pods in the ops-nodes namespace.

      - -

      -
    12. -
    - -

    Where to go next

    - - diff --git a/ee/ucp/authorization/_site/isolate-volumes.html b/ee/ucp/authorization/_site/isolate-volumes.html deleted file mode 100644 index ddb22fc23a..0000000000 --- a/ee/ucp/authorization/_site/isolate-volumes.html +++ /dev/null @@ -1,100 +0,0 @@ -

    In this example, two teams are granted access to volumes in two different -resource collections. UCP access control prevents the teams from viewing and -accessing each other’s volumes, even though they may be located in the same -nodes.

    - -
      -
    1. Create two teams.
    2. -
    3. Create two collections, one for either team.
    4. -
    5. Create grants to manage access to the collections.
    6. -
    7. Team members create volumes that are specific to their team.
    8. -
    - -

    - -

    Create two teams

    - -

    Navigate to the Organizations & Teams page to create two teams in the -“engineering” organization, named “Dev” and “Prod”. Add a user who’s not a UCP administrator to the Dev team, and add another non-admin user to the Prod team. Learn how to create and manage teams.

    - -

    - -

    Create resource collections

    - -

    In this example, the Dev and Prod teams use two different volumes, which they -access through two corresponding resource collections. The collections are -placed under the /Shared collection.

    - -
      -
    1. In the left pane, click Collections to show all of the resource -collections in the swarm.
    2. -
    3. Find the /Shared collection and click View children.
    4. -
    5. Click Create collection and name the new collection “dev-volumes”.
    6. -
    7. Click Create to create the collection.
    8. -
    9. Click Create collection again, name the new collection “prod-volumes”, -and click Create.
    10. -
    - -

    - -

    Create grants for controlling access to the new volumes

    - -

    In this example, the Dev team gets access to its volumes from a grant that -associates the team with the /Shared/dev-volumes collection, and the Prod -team gets access to its volumes from another grant that associates the team -with the /Shared/prod-volumes collection.

    - -
      -
    1. Navigate to the Grants page and click Create Grant.
    2. -
    3. In the left pane, click Collections, and in the Swarm collection, -click View Children.
    4. -
    5. In the Shared collection, click View Children.
    6. -
    7. In the list, find /Shared/dev-volumes and click Select Collection.
    8. -
    9. Click Roles, and in the dropdown, select Restricted Control.
    10. -
    11. Click Subjects, and under Select subject type, click Organizations. -In the dropdown, pick the engineering organization, and in the -Team dropdown, select Dev.
    12. -
    13. Click Create to grant permissions to the Dev team.
    14. -
    15. Click Create Grant and repeat the previous steps for the /Shared/prod-volumes -collection and the Prod team.
    16. -
    - -

    - -

    With the collections and grants in place, users can sign in and create volumes -in their assigned collections.

    - -

    Create a volume as a team member

    - -

    Team members have permission to create volumes in their assigned collection.

    - -
      -
    1. Log in as one of the users on the Dev team.
    2. -
    3. Navigate to the Volumes page to view all of the volumes in the -swarm that the user can access.
    4. -
    5. Click Create volume and name the new volume “dev-data”.
    6. -
    7. In the left pane, click Collections. The default collection -appears. At the top of the page, click Shared, find the dev-volumes collection in the list, and click Select Collection.
    8. -
    9. Click Create to add the “dev-data” volume to the collection.
    10. -
    11. Log in as one of the users on the Prod team, and repeat the -previous steps to create a “prod-data” volume assigned to the /Shared/prod-volumes collection.
    12. -
    - -

    - -

    Now you can see role-based access control in action for volumes. The user on -the Prod team can’t see the Dev team’s volumes, and if you log in again as a -user on the Dev team, you won’t see the Prod team’s volumes.

    - -

    - -

    Sign in with a UCP administrator account, and you see all of the volumes -created by the Dev and Prod users.

    - -

    - -

    Where to go next

    - - diff --git a/ee/ucp/authorization/_site/migrate-kubernetes-roles.html b/ee/ucp/authorization/_site/migrate-kubernetes-roles.html deleted file mode 100644 index 80f639d3c8..0000000000 --- a/ee/ucp/authorization/_site/migrate-kubernetes-roles.html +++ /dev/null @@ -1,122 +0,0 @@ -

    With Docker Enterprise Edition, you can create roles and grants -that implement the permissions that are defined in your Kubernetes apps. -Learn about RBAC authorization in Kubernetes.

    - -

    Docker EE has its own implementation of role-based access control, so you -can’t use Kubernetes RBAC objects directly. Instead, you create UCP roles -and grants that correspond with the role objects and bindings in your -Kubernetes app.

    - -
      -
    • Kubernetes Role and ClusterRole objects become UCP roles.
    • -
    • Kubernetes RoleBinding and ClusterRoleBinding objects become UCP grants.
    • -
    - -

    Learn about UCP roles and grants.

    - -
    -

    Kubernetes yaml in UCP

    - -

    Docker EE has its own RBAC system that’s distinct from the Kubernetes -system, so you can’t create any objects that are returned by the -/apis/rbac.authorization.k8s.io endpoints. If the yaml for your Kubernetes -app contains definitions for Role, ClusterRole, RoleBinding or -ClusterRoleBinding objects, UCP returns an error.

    -
    - -

    Migrate a Kubernetes Role to a custom UCP role

    - -

    If you have Role and ClusterRole objects defined in the yaml for your -Kubernetes app, you can realize the same authorization model by creating -custom roles by using the UCP web UI.

    - -

    The following Kubernetes yaml defines a pod-reader role, which gives users -access to the read-only pods resource APIs, get, watch, and list.

    - -
    kind: Role
    -apiVersion: rbac.authorization.k8s.io/v1
    -metadata:
    -  namespace: default
    -  name: pod-reader
    -rules:
    -- apiGroups: [""]
    -  resources: ["pods"]
    -  verbs: ["get", "watch", "list"]
    -
    - -

    Create a corresponding custom role by using the Create Role page in the -UCP web UI.

    - -
      -
    1. Log in to the UCP web UI with an administrator account.
    2. -
    3. Click Roles under User Management.
    4. -
    5. Click Create Role.
    6. -
    7. In the Role Details section, name the role “pod-reader”.
    8. -
    9. In the left pane, click Operations.
    10. -
    11. Scroll to the Kubernetes pod operations section and expand the -All Kubernetes Pod operations dropdown.
    12. -
    13. Select the Pod Get, Pod List, and Pod Watch operations. -
    14. -
    15. Click Create.
    16. -
    - -

    The pod-reader role is ready to use in grants that control access to -cluster resources.

    - -

    Migrate a Kubernetes RoleBinding to a UCP grant

    - -

    If your Kubernetes app defines RoleBinding or ClusterRoleBinding -objects for specific users, create corresponding grants by using the UCP web UI.

    - -

    The following Kubernetes yaml defines a RoleBinding that grants user “jane” -read-only access to pods in the default namespace.

    - -
    kind: RoleBinding
    -apiVersion: rbac.authorization.k8s.io/v1
    -metadata:
    -  name: read-pods
    -  namespace: default
    -subjects:
    -- kind: User
    -  name: jane
    -  apiGroup: rbac.authorization.k8s.io
    -roleRef:
    -  kind: Role
    -  name: pod-reader
    -  apiGroup: rbac.authorization.k8s.io
    -
    - -

    Create a corresponding grant by using the Create Grant page in the -UCP web UI.

    - -
      -
    1. Create a non-admin user named “jane”. Learn to create users and teams.
    2. -
    3. Click Grants under User Management.
    4. -
    5. Click Create Grant.
    6. -
    7. In the Type section, click Namespaces and ensure that default is selected.
    8. -
    9. In the left pane, click Roles, and in the Role dropdown, select pod-reader.
    10. -
    11. In the left pane, click Subjects, and click All Users.
    12. -
    13. In the User dropdown, select jane.
    14. -
    15. Click Create.
    16. -
    - -

    - -

    User “jane” has access to inspect pods in the default namespace.

    - -

    Kubernetes limitations

    - -

    There are a few limitations that you should be aware of when creating -Kubernetes workloads:

    - -
      -
    • Docker EE has its own RBAC system, so it’s not possible to create -ClusterRole objects, ClusterRoleBinding objects, or any other object that is -created by using the /apis/rbac.authorization.k8s.io endpoints.
    • -
    • To make sure your cluster is secure, only users and service accounts that have been -granted “Full Control” of all Kubernetes namespaces can deploy pods with privileged -options. This includes: PodSpec.hostIPC, PodSpec.hostNetwork, -PodSpec.hostPID, SecurityContext.allowPrivilegeEscalation, -SecurityContext.capabilities, SecurityContext.privileged, and -Volume.hostPath.
    • -
    diff --git a/ee/ucp/authorization/_site/pull-images.html b/ee/ucp/authorization/_site/pull-images.html deleted file mode 100644 index af82bb922a..0000000000 --- a/ee/ucp/authorization/_site/pull-images.html +++ /dev/null @@ -1,30 +0,0 @@ -

    By default only admin users can pull images into a cluster managed by UCP.

    - -

    Images are a shared resource, as such they are always in the swarm collection. -To allow users access to pull images, you need to grant them the image load -permission for the swarm collection.

    - -

    As an admin user, go to the UCP web UI, navigate to the Roles page, -and create a new role named Pull images.

    - -

    - -

    Then go to the Grants page, and create a new grant with:

    - -
      -
    • Subject: the user you want to be able to pull images.
    • -
    • Roles: the “Pull images” role you created.
    • -
    • Resource set: the swarm collection.
    • -
    - -

    - -

    Once you click Create the user is able to pull images from the UCP web UI -or the CLI.

    - -

    Where to go next

    - - diff --git a/ee/ucp/authorization/_site/reset-user-password.html b/ee/ucp/authorization/_site/reset-user-password.html deleted file mode 100644 index 68cf08e3ae..0000000000 --- a/ee/ucp/authorization/_site/reset-user-password.html +++ /dev/null @@ -1,24 +0,0 @@ -

    Docker EE administrators can reset user passwords managed in UCP:

    - -
      -
    1. Log in to UCP with administrator credentials.
    2. -
    3. Click Users under User Management.
    4. -
    5. Select the user whose password you want to change.
    6. -
    7. Select Configure and select Security.
    8. -
    9. Enter the new password, confirm, and click Update Password.
    10. -
    - -

    Users passwords managed with an LDAP service must be changed on the LDAP server.

    - -

    - -

    Change administrator passwords

    - -

    Administrators who need a password change can ask another administrator for help -or use ssh to log in to a manager node managed by Docker EE and run:

    - -
    
    -docker run --net=host -v ucp-auth-api-certs:/tls -it "$(docker inspect --format '{{ .Spec.TaskTemplate.ContainerSpec.Image }}' ucp-auth-api)" "$(docker inspect --format '{{ index .Spec.TaskTemplate.ContainerSpec.Args 0 }}' ucp-auth-api)" passwd -i
    -
    -
    - diff --git a/ee/ucp/authorization/grant-permissions.md b/ee/ucp/authorization/grant-permissions.md index 85473d7a22..2797ccdfd1 100644 --- a/ee/ucp/authorization/grant-permissions.md +++ b/ee/ucp/authorization/grant-permissions.md @@ -31,10 +31,10 @@ With Kubernetes orchestration, a grant is made up of *subject*, *role*, and > namespace. {: .important} -To create a Kubernetes grant in UCP: +To create a Kubernetes grant (role binding) in UCP: -1. Click **Grants** under **User Management**. -2. Click **Create Grant**. +1. Click **Grants** under **Access Control**. +2. Click **Create Role Binding**. 3. Click **Namespaces** under **Kubernetes**. 4. Find the desired namespace and click **Select Namespace**. 5. On the **Roles** tab, select a role. @@ -55,13 +55,14 @@ With Swarm orchestration, a grant is made up of *subject*, *role*, and To create a grant in UCP: -1. Click **Grants** under **User Management**. -2. Click **Create Grant**. -3. On the Collections tab, click **Collections** (for Swarm). -4. Click **View Children** until you get to the desired collection and **Select**. -5. On the **Roles** tab, select a role. -6. On the **Subjects** tab, select a user, team, or organization to authorize. -7. Click **Create**. +1. Click **Grants** under **Access Control**. +2. Click **Swarm** +3. Click **Create Grant**. +4. In the **Select Subject Type** section, select **Users** or **Organizations**. +5. Click **View Children** until you get to the desired collection and **Select**. +6. On the **Roles** tab, select a role. +7. On the **Subjects** tab, select a user, team, or organization to authorize. +8. Click **Create**. > By default, all new users are placed in the `docker-datacenter` organization. > To apply permissions to all Docker EE users, create a grant with the diff --git a/ee/ucp/authorization/group-resources.md b/ee/ucp/authorization/group-resources.md index 67f3fdbad4..01722b334f 100644 --- a/ee/ucp/authorization/group-resources.md +++ b/ee/ucp/authorization/group-resources.md @@ -24,7 +24,7 @@ and resource quotas for the namespace. Each Kubernetes resources can only be in one namespace, and namespaces cannot be nested inside one another. -[Learn more about Kubernetes namespaces](https://v1-8.docs.kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). +[Learn more about Kubernetes namespaces](https://v1-11.docs.kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). ## Swarm collections diff --git a/ee/ucp/authorization/index.md b/ee/ucp/authorization/index.md index aaacb86ef6..eb239040dc 100644 --- a/ee/ucp/authorization/index.md +++ b/ee/ucp/authorization/index.md @@ -67,7 +67,7 @@ To control user access, cluster resources are grouped into Docker Swarm networks, nodes, services, secrets, and volumes. - **Kubernetes namespaces**: A -[namespace](https://v1-8.docs.kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) +[namespace](https://v1-11.docs.kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) is a logical area for a Kubernetes cluster. Kubernetes comes with a `default` namespace for your cluster objects, plus two more namespaces for system and public resources. You can create custom namespaces, but unlike Swarm diff --git a/ee/ucp/authorization/migrate-kubernetes-roles.md b/ee/ucp/authorization/migrate-kubernetes-roles.md deleted file mode 100644 index ec2e861983..0000000000 --- a/ee/ucp/authorization/migrate-kubernetes-roles.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Migrate Kubernetes roles to Docker EE authorization -description: Learn how to transfer Kubernetes Role and RoleBinding objects to UCP roles and grants. -keywords: authorization, authentication, authorize, authenticate, user, team, UCP, Kubernetes, role, grant ---- - -With Docker Enterprise Edition, you can create roles and grants -that implement the permissions that are defined in your Kubernetes apps. -Learn about [RBAC authorization in Kubernetes](https://v1-8.docs.kubernetes.io/docs/admin/authorization/rbac/). - -Docker EE has its own implementation of role-based access control, so you -can't use Kubernetes RBAC objects directly. Instead, you create UCP roles -and grants that correspond with the role objects and bindings in your -Kubernetes app. - -- Kubernetes `Role` and `ClusterRole` objects become UCP roles. -- Kubernetes `RoleBinding` and `ClusterRoleBinding` objects become UCP grants. - -Learn about [UCP roles and grants](grant-permissions.md). - -> Kubernetes yaml in UCP -> -> Docker EE has its own RBAC system that's distinct from the Kubernetes -> system, so you can't create any objects that are returned by the -> `/apis/rbac.authorization.k8s.io` endpoints. If the yaml for your Kubernetes -> app contains definitions for `Role`, `ClusterRole`, `RoleBinding` or -> `ClusterRoleBinding` objects, UCP returns an error. -{: .important} - -## Migrate a Kubernetes Role to a custom UCP role - -If you have `Role` and `ClusterRole` objects defined in the yaml for your -Kubernetes app, you can realize the same authorization model by creating -custom roles by using the UCP web UI. - -The following Kubernetes yaml defines a `pod-reader` role, which gives users -access to the read-only `pods` resource APIs, `get`, `watch`, and `list`. - -```yaml -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: default - name: pod-reader -rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["get", "watch", "list"] -``` - -Create a corresponding custom role by using the **Create Role** page in the -UCP web UI. - -1. Log in to the UCP web UI with an administrator account. -2. Click **Roles** under **User Management**. -3. Click **Create Role**. -4. In the **Role Details** section, name the role "pod-reader". -5. In the left pane, click **Operations**. -6. Scroll to the **Kubernetes pod operations** section and expand the - **All Kubernetes Pod operations** dropdown. -7. Select the **Pod Get**, **Pod List**, and **Pod Watch** operations. - ![](../images/migrate-kubernetes-roles-1.png){: .with-border} -8. Click **Create**. - -The `pod-reader` role is ready to use in grants that control access to -cluster resources. - -## Migrate a Kubernetes RoleBinding to a UCP grant - -If your Kubernetes app defines `RoleBinding` or `ClusterRoleBinding` -objects for specific users, create corresponding grants by using the UCP web UI. - -The following Kubernetes yaml defines a `RoleBinding` that grants user "jane" -read-only access to pods in the `default` namespace. - -```yaml -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: read-pods - namespace: default -subjects: -- kind: User - name: jane - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: Role - name: pod-reader - apiGroup: rbac.authorization.k8s.io -``` - -Create a corresponding grant by using the **Create Grant** page in the -UCP web UI. - -1. Create a non-admin user named "jane". [Learn to create users and teams](create-users-and-teams-manually.md). -1. Click **Grants** under **User Management**. -2. Click **Create Grant**. -3. In the **Type** section, click **Namespaces** and ensure that **default** is selected. -4. In the left pane, click **Roles**, and in the **Role** dropdown, select **pod-reader**. -5. In the left pane, click **Subjects**, and click **All Users**. -6. In the **User** dropdown, select **jane**. -7. Click **Create**. - -![](../images/migrate-kubernetes-roles-2.png){: .with-border} - -User "jane" has access to inspect pods in the `default` namespace. - -## Kubernetes limitations - -There are a few limitations that you should be aware of when creating -Kubernetes workloads: - -* Docker EE has its own RBAC system, so it's not possible to create - `ClusterRole` objects, `ClusterRoleBinding` objects, or any other object that is - created by using the `/apis/rbac.authorization.k8s.io` endpoints. -* To make sure your cluster is secure, only users and service accounts that have been - granted "Full Control" of all Kubernetes namespaces can deploy pods with privileged - options. This includes: `PodSpec.hostIPC`, `PodSpec.hostNetwork`, - `PodSpec.hostPID`, `SecurityContext.allowPrivilegeEscalation`, - `SecurityContext.capabilities`, `SecurityContext.privileged`, and - `Volume.hostPath`. diff --git a/ee/ucp/authorization/reset-user-password.md b/ee/ucp/authorization/reset-user-password.md index e9932393a8..92f6116ccc 100644 --- a/ee/ucp/authorization/reset-user-password.md +++ b/ee/ucp/authorization/reset-user-password.md @@ -4,28 +4,42 @@ description: Learn how to recover your Docker Enterprise Edition credentials. keywords: ucp, authentication, password --- +## Change user passwords + +### Managed in UCP + Docker EE administrators can reset user passwords managed in UCP: 1. Log in to UCP with administrator credentials. -2. Click **Users** under **User Management**. +2. Navigate to **Access Control > Users**. 3. Select the user whose password you want to change. -4. Select **Configure** and select **Security**. + ![](../images/recover-a-user-password-1.png){: .with-border} +4. Click **Edit**. Once on the "Update User" view, select **Security** from the left navigation. 5. Enter the new password, confirm, and click **Update Password**. -Users passwords managed with an LDAP service must be changed on the LDAP server. +![](../images/recover-a-user-password-2.png){: .with-border} -![](../images/recover-a-user-password-1.png){: .with-border} +### Managed through LDAP + +User passwords managed with an LDAP service must be changed on the LDAP server. ## Change administrator passwords -Administrators who need a password change can ask another administrator for help -or use **ssh** to log in to a manager node managed by Docker EE and run: +Administrators who need to update their passwords can ask another administrator for help +or SSH into a Docker Enterprise [manager node](/engine/swarm/how-swarm-mode-works/nodes/#manager-nodes) and run: -```none {% raw %} +```bash docker run --net=host -v ucp-auth-api-certs:/tls -it "$(docker inspect --format '{{ .Spec.TaskTemplate.ContainerSpec.Image }}' ucp-auth-api)" "$(docker inspect --format '{{ index .Spec.TaskTemplate.ContainerSpec.Args 0 }}' ucp-auth-api)" passwd -i -{% endraw %} ``` +{% endraw %} +### With DEBUG Global Log Level +If you have DEBUG set as your global log level within UCP, running `$(docker inspect --format '{{ index .Spec.TaskTemplate.ContainerSpec.Args 0 }}` returns `--debug` instead of `--db-addr`. Pass `Args 1` to `$docker inspect` instead to reset your admin password. +{% raw %} +```bash +docker run --net=host -v ucp-auth-api-certs:/tls -it "$(docker inspect --format '{{ .Spec.TaskTemplate.ContainerSpec.Image }}' ucp-auth-api)" "$(docker inspect --format '{{ index .Spec.TaskTemplate.ContainerSpec.Args 1 }}' ucp-auth-api)" passwd -i +``` +{% endraw %} diff --git a/ee/ucp/images/deploy-compose-kubernetes-0.png b/ee/ucp/images/deploy-compose-kubernetes-0.png new file mode 100644 index 0000000000..e1ff75d54e Binary files /dev/null and b/ee/ucp/images/deploy-compose-kubernetes-0.png differ diff --git a/ee/ucp/images/deploy-compose-kubernetes-2.png b/ee/ucp/images/deploy-compose-kubernetes-2.png index 18454e3b28..bac01f3f0e 100644 Binary files a/ee/ucp/images/deploy-compose-kubernetes-2.png and b/ee/ucp/images/deploy-compose-kubernetes-2.png differ diff --git a/ee/ucp/images/deploy-compose-kubernetes-3.png b/ee/ucp/images/deploy-compose-kubernetes-3.png index dfc731d7ed..2275818b03 100644 Binary files a/ee/ucp/images/deploy-compose-kubernetes-3.png and b/ee/ucp/images/deploy-compose-kubernetes-3.png differ diff --git a/ee/ucp/images/ingress-deploy.png b/ee/ucp/images/ingress-deploy.png new file mode 100644 index 0000000000..cadbf33928 Binary files /dev/null and b/ee/ucp/images/ingress-deploy.png differ diff --git a/ee/ucp/images/interlock_service_clusters.png~HEAD b/ee/ucp/images/interlock_service_clusters.png~HEAD new file mode 100644 index 0000000000..84ad5f1898 Binary files /dev/null and b/ee/ucp/images/interlock_service_clusters.png~HEAD differ diff --git a/ee/ucp/images/interlock_service_clusters.png~Raw content addition b/ee/ucp/images/interlock_service_clusters.png~Raw content addition new file mode 100644 index 0000000000..84ad5f1898 Binary files /dev/null and b/ee/ucp/images/interlock_service_clusters.png~Raw content addition differ diff --git a/ee/ucp/images/kubernetes-version.png b/ee/ucp/images/kubernetes-version.png index 60a248e849..eaf80406fd 100644 Binary files a/ee/ucp/images/kubernetes-version.png and b/ee/ucp/images/kubernetes-version.png differ diff --git a/ee/ucp/images/recover-a-user-password-1.png b/ee/ucp/images/recover-a-user-password-1.png index 4b7ac8b950..93f353374e 100644 Binary files a/ee/ucp/images/recover-a-user-password-1.png and b/ee/ucp/images/recover-a-user-password-1.png differ diff --git a/ee/ucp/images/recover-a-user-password-2.png b/ee/ucp/images/recover-a-user-password-2.png new file mode 100644 index 0000000000..dd6564a8cd Binary files /dev/null and b/ee/ucp/images/recover-a-user-password-2.png differ diff --git a/ee/ucp/images/saml_okta_1.png b/ee/ucp/images/saml_okta_1.png new file mode 100644 index 0000000000..b1ed8a0731 Binary files /dev/null and b/ee/ucp/images/saml_okta_1.png differ diff --git a/ee/ucp/images/saml_okta_2.png b/ee/ucp/images/saml_okta_2.png new file mode 100644 index 0000000000..5a62445c16 Binary files /dev/null and b/ee/ucp/images/saml_okta_2.png differ diff --git a/ee/ucp/images/saml_okta_3.png b/ee/ucp/images/saml_okta_3.png new file mode 100644 index 0000000000..92a49bc4bf Binary files /dev/null and b/ee/ucp/images/saml_okta_3.png differ diff --git a/ee/ucp/images/saml_okta_4.png b/ee/ucp/images/saml_okta_4.png new file mode 100644 index 0000000000..497d982426 Binary files /dev/null and b/ee/ucp/images/saml_okta_4.png differ diff --git a/ee/ucp/images/troubleshoot-with-logs-1.png b/ee/ucp/images/troubleshoot-with-logs-1.png index 136e702b73..ba9b785a4f 100644 Binary files a/ee/ucp/images/troubleshoot-with-logs-1.png and b/ee/ucp/images/troubleshoot-with-logs-1.png differ diff --git a/ee/ucp/images/troubleshoot-with-logs-2.png b/ee/ucp/images/troubleshoot-with-logs-2.png index bb1af8fc70..f19020231a 100644 Binary files a/ee/ucp/images/troubleshoot-with-logs-2.png and b/ee/ucp/images/troubleshoot-with-logs-2.png differ diff --git a/ee/ucp/images/troubleshoot-with-logs-3.png b/ee/ucp/images/troubleshoot-with-logs-3.png new file mode 100644 index 0000000000..df2fed856c Binary files /dev/null and b/ee/ucp/images/troubleshoot-with-logs-3.png differ diff --git a/ee/ucp/index.md b/ee/ucp/index.md index ac171fe4ff..e8bc8a4625 100644 --- a/ee/ucp/index.md +++ b/ee/ucp/index.md @@ -5,7 +5,6 @@ description: | keywords: ucp, overview, orchestration, cluster redirect_from: - /ucp/ - - /datacenter/ucp/3.0/guides/ --- Docker Universal Control Plane (UCP) is the enterprise-grade cluster management diff --git a/ee/ucp/interlock/architecture.md b/ee/ucp/interlock/architecture.md index c618741bc8..4a4d6c4022 100644 --- a/ee/ucp/interlock/architecture.md +++ b/ee/ucp/interlock/architecture.md @@ -2,7 +2,7 @@ title: Interlock architecture description: Learn more about the architecture of the layer 7 routing solution for Docker swarm services. -keywords: routing, proxy +keywords: routing, UCP, interlock, load balancing --- This document covers the following considerations: @@ -114,4 +114,8 @@ The following features are supported in VIP mode: ## Next steps - [Deploy Interlock](deploy/index.md) +<<<<<<< HEAD +- [Configure Interlock](config/index.md) +======= - [Configure Interlock[(config/index.md) +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 diff --git a/ee/ucp/interlock/config/host-mode-networking.md b/ee/ucp/interlock/config/host-mode-networking.md index 09cad3147a..262cd334a2 100644 --- a/ee/ucp/interlock/config/host-mode-networking.md +++ b/ee/ucp/interlock/config/host-mode-networking.md @@ -1,14 +1,34 @@ --- +<<<<<<< HEAD +<<<<<<< HEAD +title: Configure host mode networking +description: Learn how to configure the UCP layer 7 routing solution with + host mode networking. +keywords: routing, proxy, interlock, load balancing +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 title: Host mode networking description: Learn how to configure the UCP layer 7 routing solution with host mode networking. keywords: routing, proxy +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 redirect_from: - /ee/ucp/interlock/usage/host-mode-networking/ - /ee/ucp/interlock/deploy/host-mode-networking/ --- +<<<<<<< HEAD +<<<<<<< HEAD +======= # Configuring host mode networking +>>>>>>> Raw content addition +======= +# Configuring host mode networking +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 By default, layer 7 routing components communicate with one another using overlay networks, but Interlock supports host mode networking in a variety of ways, including proxy only, Interlock only, application only, and hybrid. @@ -27,14 +47,30 @@ To use host mode networking instead of overlay networking: ## Configuration for a production-grade deployment If you have not done so, configure the +<<<<<<< HEAD +<<<<<<< HEAD +[layer 7 routing solution for production](../deploy/production.md). +======= [layer 7 routing solution for production](production.md). +>>>>>>> Raw content addition +======= +[layer 7 routing solution for production](production.md). +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 The `ucp-interlock-proxy` service replicas should then be running on their own dedicated nodes. ## Update the ucp-interlock config +<<<<<<< HEAD +<<<<<<< HEAD +[Update the ucp-interlock service configuration](./index.md) so that it uses +======= [Update the ucp-interlock service configuration](configure.md) so that it uses +>>>>>>> Raw content addition +======= +[Update the ucp-interlock service configuration](configure.md) so that it uses +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 host mode networking. Update the `PublishMode` key to: @@ -92,6 +128,18 @@ service is running. If everything is working correctly, you should get a JSON result like: +<<<<<<< HEAD +<<<<<<< HEAD +{% raw %} +```json +{"instance":"63b855978452", "version":"0.1", "request_id":"d641430be9496937f2669ce6963b67d6"} +``` +{% endraw %} + +The following example describes how to configure an eight (8) node Swarm cluster that uses host mode +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ```json {"instance":"63b855978452", "version":"0.1", "request_id":"d641430be9496937f2669ce6963b67d6"} ``` @@ -102,20 +150,46 @@ If everything is working correctly, you should get a JSON result like: In this example we will configure an eight (8) node Swarm cluster that uses host mode +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 networking to route traffic without using overlay networks. There are three (3) managers and five (5) workers. Two of the workers are configured with node labels to be dedicated ingress cluster load balancer nodes. These will receive all application traffic. +<<<<<<< HEAD +<<<<<<< HEAD +This example does not cover the actual deployment of infrastructure. +======= This example will not cover the actual deployment of infrastructure. +>>>>>>> Raw content addition +======= +This example will not cover the actual deployment of infrastructure. +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 It assumes you have a vanilla Swarm cluster (`docker init` and `docker swarm join` from the nodes). See the [Swarm](https://docs.docker.com/engine/swarm/) documentation if you need help getting a Swarm cluster deployed. +<<<<<<< HEAD +<<<<<<< HEAD +Note: When using host mode networking, you cannot use the DNS service discovery because that +requires overlay networking. You can use other tooling such as [Registrator](https://github.com/gliderlabs/registrator) +that will give you that functionality if needed. + +Configure the load balancer worker nodes (`lb-00` and `lb-01`) with node labels in order to pin the Interlock Proxy +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Note: when using host mode networking you will not be able to use the DNS service discovery as that requires overlay networking. You can use other tooling such as [Registrator](https://github.com/gliderlabs/registrator) that will give you that functionality if needed. We will configure the load balancer worker nodes (`lb-00` and `lb-01`) with node labels in order to pin the Interlock Proxy +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 service. Once you are logged into one of the Swarm managers run the following to add node labels to the dedicated load balancer worker nodes: @@ -128,12 +202,26 @@ lb-01 Inspect each node to ensure the labels were successfully added: +<<<<<<< HEAD +<<<<<<< HEAD +{% raw %} +======= +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ```bash $> docker node inspect -f '{{ .Spec.Labels }}' lb-00 map[nodetype:loadbalancer] $> docker node inspect -f '{{ .Spec.Labels }}' lb-01 map[nodetype:loadbalancer] ``` +<<<<<<< HEAD +<<<<<<< HEAD +{% endraw %} +======= +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Next, create a configuration object for Interlock that specifies host mode networking: @@ -145,10 +233,23 @@ PollInterval = "3s" [Extensions] [Extensions.default] +<<<<<<< HEAD +<<<<<<< HEAD + Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}" + Args = [] + ServiceName = "interlock-ext" + ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}" +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Image = "interlockpreview/interlock-extension-nginx:2.0.0-preview" Args = [] ServiceName = "interlock-ext" ProxyImage = "nginx:alpine" +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ProxyArgs = [] ProxyServiceName = "interlock-proxy" ProxyConfigPath = "/etc/nginx/nginx.conf" @@ -170,7 +271,15 @@ oqkvv1asncf6p2axhx41vylgt Note the `PublishMode = "host"` setting. This instructs Interlock to configure the proxy service for host mode networking. +<<<<<<< HEAD +<<<<<<< HEAD +Now create the Interlock service also using host mode networking: +======= Now we can create the Interlock service also using host mode networking: +>>>>>>> Raw content addition +======= +Now we can create the Interlock service also using host mode networking: +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ```bash $> docker service create \ @@ -179,11 +288,25 @@ $> docker service create \ --constraint node.role==manager \ --publish mode=host,target=8080 \ --config src=service.interlock.conf,target=/config.toml \ +<<<<<<< HEAD +<<<<<<< HEAD + { page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} -D run -c /config.toml +sjpgq7h621exno6svdnsvpv9z +``` + +## Configure proxy services +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 interlockpreview/interlock:2.0.0-preview -D run -c /config.toml sjpgq7h621exno6svdnsvpv9z ``` ## Configure Proxy Services +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 With the node labels, you can re-configure the Interlock Proxy services to be constrained to the workers. From a manager run the following to pin the proxy services to the load balancer worker nodes: diff --git a/ee/ucp/interlock/config/index.md b/ee/ucp/interlock/config/index.md index f31a72e1fd..93487e9ca4 100644 --- a/ee/ucp/interlock/config/index.md +++ b/ee/ucp/interlock/config/index.md @@ -1,13 +1,33 @@ --- +<<<<<<< HEAD +<<<<<<< HEAD +title: Configure layer 7 routing service +description: Learn how to configure the layer 7 routing solution for UCP. +keywords: routing, proxy, interlock, load balancing +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 title: Configuring layer 7 routing service description: Learn how to configure the layer 7 routing solution for UCP, that allows you to route traffic to swarm services. keywords: routing, proxy +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 redirect_from: - /ee/ucp/interlock/deploy/configure/ - /ee/ucp/interlock/usage/default-service/ --- +<<<<<<< HEAD +<<<<<<< HEAD +To further customize the layer 7 routing solution, you must update the +`ucp-interlock` service with a new Docker configuration. + +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 # Configuring layer 7 routing services You can configure ports for incoming traffic from the UCP web UI. @@ -16,6 +36,10 @@ To further customize the layer 7 routing solution, you must update the Here's how it works: +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 1. Find out what configuration is currently being used for the `ucp-interlock` service and save it to a file: @@ -91,6 +115,26 @@ The core configuraton handles the Interlock service itself. These are the config Interlock must contain at least one extension to service traffic. The following options are available to configure the extensions: +<<<<<<< HEAD +<<<<<<< HEAD +| Option | Type | Description | +|:-------------------|:------------|:-----------------------------------------------------------| +| `Image` | string | Name of the Docker Image to use for the extension service | +| `Args` | []string | Arguments to be passed to the Docker extension service upon creation | +| `Labels` | map[string]string | Labels to add to the extension service | +| `ContainerLabels` | map[string]string | labels to be added to the extension service tasks | +| `Constraints` | []string | one or more [constraints](https://docs.docker.com/engine/reference/commandline/service_create/#specify-service-constraints-constraint) to use when scheduling the extension service | +| `PlacementPreferences` | []string | one or more [placement prefs](https://docs.docker.com/engine/reference/commandline/service_create/#specify-service-placement-preferences-placement-pref) to use when scheduling the extension service | +| `ServiceName` | string | Name of the extension service | +| `ProxyImage` | string | Name of the Docker Image to use for the proxy service | +| `ProxyArgs` | []string | Arguments to be passed to the Docker proxy service upon creation | +| `ProxyLabels` | map[string]string | Labels to add to the proxy service | +| `ProxyContainerLabels` | map[string]string | labels to be added to the proxy service tasks | +| `ProxyServiceName` | string | Name of the proxy service | +| `ProxyConfigPath` | string | Path in the service for the generated proxy config | +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 | Option | Type | Description | |:-------------------|:------------------|:------------------------------------------------------------------------------| | `Image` | string | Name of the Docker image to use for the extension service. | @@ -126,17 +170,37 @@ Interlock must contain at least one extension to service traffic. The following | `ProxyContainerLabels` | map[string]string | labels to be added to the proxy service tasks | | `ProxyServiceName` | string | name of the proxy service | | `ProxyConfigPath` | string | path in the service for the generated proxy config | +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 | `ProxyReplicas` | uint | number of proxy service replicas | | `ProxyStopSignal` | string | stop signal for the proxy service (i.e. `SIGQUIT`) | | `ProxyStopGracePeriod` | string | stop grace period for the proxy service (i.e. `5s`) | | `ProxyConstraints` | []string | one or more [constraints](https://docs.docker.com/engine/reference/commandline/service_create/#specify-service-constraints-constraint) to use when scheduling the proxy service | | `ProxyPlacementPreferences` | []string | one or more [placement prefs](https://docs.docker.com/engine/reference/commandline/service_create/#specify-service-placement-preferences-placement-pref) to use when scheduling the proxy service | +<<<<<<< HEAD +<<<<<<< HEAD +| `ProxyUpdateDelay` | string | delay between rolling proxy container updates | +| `ServiceCluster` | string | Name of the cluster this extension services | +| `PublishMode` | string (`ingress` or `host`) | Publish mode that the proxy service uses | +| `PublishedPort` | int | Port on which the proxy service serves non-SSL traffic | +| `PublishedSSLPort` | int | Port on which the proxy service serves SSL traffic | +| `Template` | string | Docker configuration object that is used as the extension template | +| `Config` | Config | Proxy configuration used by the extensions as described in the following table | +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 | `ServiceCluster` | string | name of the cluster this extension services | | `PublishMode` | string (`ingress` or `host`) | publish mode that the proxy service uses | | `PublishedPort` | int | port that the proxy service serves non-SSL traffic | | `PublishedSSLPort` | int | port that the proxy service serves SSL traffic | | `Template` | string | Docker config object that is used as the extension template | | `Config` | Config | proxy configuration used by the extensions as listed below | +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ### Proxy Options are made available to the extensions, and the extensions utilize the options needed for proxy service configuration. This provides overrides to the extension configuration. @@ -147,7 +211,15 @@ different configuration options available. Refer to the documentation for each - [Nginx](nginx-config.md) - [HAproxy](haproxy-config.md) +<<<<<<< HEAD +<<<<<<< HEAD +#### Customize the default proxy service +======= #### Customizing the default proxy service +>>>>>>> Raw content addition +======= +#### Customizing the default proxy service +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 The default proxy service used by UCP to provide layer 7 routing is NGINX. If users try to access a route that hasn't been configured, they will see the default NGINX 404 page: ![Default NGINX page](../../images/interlock-default-service-1.png){: .with-border} @@ -198,10 +270,23 @@ DockerURL = "unix:///var/run/docker.sock" PollInterval = "3s" [Extensions.default] +<<<<<<< HEAD +<<<<<<< HEAD + Image = "{{ page.ucp_org }}/interlock-extension-nginx:{{ page.ucp_version }}" + Args = ["-D"] + ServiceName = "interlock-ext" + ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}" +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Image = "docker/interlock-extension-nginx:latest" Args = ["-D"] ServiceName = "interlock-ext" ProxyImage = "nginx:alpine" +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ProxyArgs = [] ProxyServiceName = "interlock-proxy" ProxyConfigPath = "/etc/nginx/nginx.conf" @@ -222,6 +307,16 @@ PollInterval = "3s" ## Next steps +<<<<<<< HEAD +<<<<<<< HEAD +- [Configure host mode networking](host-mode-networking.md) +- [Configure an nginx extension](nginx-config.md) +- [Use application service labels](service-labels.md) +- [Tune the proxy service](tuning.md) +- [Update Interlock services](updates.md) +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 - [Using a custom extension template](custom-template.md) - [Configuring an HAProxy extension](haproxy-config.md) - [Configuring host mode networking](host-mode-networking.md) @@ -229,3 +324,7 @@ PollInterval = "3s" - [Using application service labels](service-lables.md) - [Tuning the proxy service](tuning.md) - [Updating Interlock services](updates.md) +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 diff --git a/ee/ucp/interlock/config/nginx-config.md b/ee/ucp/interlock/config/nginx-config.md index e9315639b2..968e479d63 100644 --- a/ee/ucp/interlock/config/nginx-config.md +++ b/ee/ucp/interlock/config/nginx-config.md @@ -1,4 +1,34 @@ --- +<<<<<<< HEAD +<<<<<<< HEAD +title: Configure Nginx +description: Learn how to configure an nginx extension +keywords: routing, proxy, interlock, load balancing +--- + +By default, nginx is used as a proxy, so the following configuration options are +available for the nginx extension: + +| Option | Type | Description . | Defaults | +|:------ |:------ |:------ |:------ | +| `User` | string | User to be used in the proxy | `nginx` | +| `PidPath` | string | Path to the pid file for the proxy service | `/var/run/proxy.pid` | +| `MaxConnections` | int | Maximum number of connections for proxy service | `1024` | +| `ConnectTimeout` | int | Timeout in seconds for clients to connect | `600` | +| `SendTimeout` | int | Timeout in seconds for the service to send a request to the proxied upstream | `600` | +| `ReadTimeout` | int | Timeout in seconds for the service to read a response from the proxied upstream | `600` | +| `SSLOpts` | string | Options to be passed when configuring SSL | | +| `SSLDefaultDHParam` | int | Size of DH parameters | `1024` | +| `SSLDefaultDHParamPath` | string | Path to DH parameters file | | +| `SSLVerify` | string | SSL client verification | `required` | +| `WorkerProcesses` | string | Number of worker processes for the proxy service | `1` | +| `RLimitNoFile` | int | Number of maxiumum open files for the proxy service | `65535` | +| `SSLCiphers` | string | SSL ciphers to use for the proxy service | `HIGH:!aNULL:!MD5` | +| `SSLProtocols` | string | Enable the specified TLS protocols | `TLSv1.2` | +| `HideInfoHeaders` | bool | Hide proxy-related response headers. | +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 title: Nginx configuration description: Learn how to configure an nginx extension keywords: routing, proxy @@ -49,6 +79,10 @@ This is from Interlock docs - which is correct??????? | `RLimitNoFile` | int | number of maxiumum open files for the proxy service | `65535` | | `SSLCiphers` | string | SSL ciphers to use for the proxy service | `HIGH:!aNULL:!MD5` | | `SSLProtocols` | string | enable the specified TLS protocols | `TLSv1.2` | +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 | `KeepaliveTimeout` | string | connection keepalive timeout | `75s` | | `ClientMaxBodySize` | string | maximum allowed size of the client request body | `1m` | | `ClientBodyBufferSize` | string | sets buffer size for reading client request body | `8k` | @@ -66,4 +100,11 @@ This is from Interlock docs - which is correct??????? | `MainLogFormat` | string | [Format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) to use for main logger | see default format | | `TraceLogFormat` | string | [Format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format) to use for trace logger | see default format | +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> Raw content addition +======= + +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 diff --git a/ee/ucp/interlock/config/service-labels.md b/ee/ucp/interlock/config/service-labels.md index e7aa26206b..216db3d622 100644 --- a/ee/ucp/interlock/config/service-labels.md +++ b/ee/ucp/interlock/config/service-labels.md @@ -1,4 +1,14 @@ --- +<<<<<<< HEAD +<<<<<<< HEAD +title: Use application service labels +description: Learn how applications use service labels for publishing +keywords: routing, proxy, interlock, load balancing +--- + +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 title: Application service labels description: Learn how applications use service labels for publishing keywords: routing, proxy @@ -6,6 +16,10 @@ keywords: routing, proxy # Using application service labels +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Service labels define hostnames that are routed to the service, the applicable ports, and other routing configurations. Applications that publish using Interlock use service labels to configure how they are published. diff --git a/ee/ucp/interlock/config/tuning.md b/ee/ucp/interlock/config/tuning.md index ce9b26860c..28818a70a4 100644 --- a/ee/ucp/interlock/config/tuning.md +++ b/ee/ucp/interlock/config/tuning.md @@ -1,4 +1,18 @@ --- +<<<<<<< HEAD +<<<<<<< HEAD +title: Tune the proxy service +description: Learn how to tune the proxy service for environment optimization +keywords: routing, proxy, interlock +--- + +## Constrain the proxy service to multiple dedicated worker nodes +Refer to [Proxy service constraints](../deploy/production.md) for information on how to constrain the proxy service to multiple dedicated worker nodes. + +## Stop +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 title: Proxy service tuning description: Learn how to ????? keywords: routing, proxy @@ -10,6 +24,10 @@ keywords: routing, proxy Refer to [Proxy service constraints](../deploy/production.md) for information on how to constrain the proxy service to multiple dedicated worker nodes. ## Stopping +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 To adjust the stop signal and period, use the `stop-signal` and `stop-grace-period` settings. For example, to set the stop signal to `SIGTERM` and grace period to ten (10) seconds, use the following command: diff --git a/ee/ucp/interlock/config/updates.md b/ee/ucp/interlock/config/updates.md index 5cb79c1c94..3d49e63e19 100644 --- a/ee/ucp/interlock/config/updates.md +++ b/ee/ucp/interlock/config/updates.md @@ -1,4 +1,20 @@ --- +<<<<<<< HEAD +<<<<<<< HEAD +title: Update Interlock services +description: Learn how to update the UCP layer 7 routing solution services +keywords: routing, proxy, interlock +--- + +There are two parts to the update process: + +1. Update the Interlock configuration to specify the new extension and/or proxy image versions. +2. Update the Interlock service to use the new configuration and image. + +## Update the Interlock configuration +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 title: Updating Interlock services description: Learn how to update the UCP layer 7 routing solution services keywords: routing, proxy @@ -11,12 +27,85 @@ There are two parts to the update process: 2. Updating the Interlock service to use the new configuration and image. ## Updating the Interlock configuration +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Create the new configuration: ```bash $> docker config create service.interlock.conf.v2 ``` +<<<<<<< HEAD +<<<<<<< HEAD +## Update the Interlock service +Remove the old configuration and specify the new configuration: + +```bash +$> docker service update --config-rm service.interlock.conf ucp-interlock +$> docker service update --config-add source=service.interlock.conf.v2,target=/config.toml ucp-interlock +``` + +Next, update the Interlock service to use the new image. To pull the latest version of UCP, run the following: + +```bash +$> docker pull docker/ucp:latest +``` + +### Example output + +```bash +latest: Pulling from docker/ucp +cd784148e348: Already exists +3871e7d70c20: Already exists +cad04e4a4815: Pull complete +Digest: sha256:63ca6d3a6c7e94aca60e604b98fccd1295bffd1f69f3d6210031b72fc2467444 +Status: Downloaded newer image for docker/ucp:latest +docker.io/docker/ucp:latest +``` + +Next, list all the latest UCP images. To learn more about `docker/ucp images` and available options, +see [the reference page](/reference/ucp/3.1/cli/images/). + +```bash +$> docker run --rm docker/ucp images --list +``` + +### Example output + +```bash +docker/ucp-agent:{{ page.ucp_version }} +docker/ucp-auth-store:{{ page.ucp_version }} +docker/ucp-auth:{{ page.ucp_version }} +docker/ucp-azure-ip-allocator:{{ page.ucp_version }} +docker/ucp-calico-cni:{{ page.ucp_version }} +docker/ucp-calico-kube-controllers:{{ page.ucp_version }} +docker/ucp-calico-node:{{ page.ucp_version }} +docker/ucp-cfssl:{{ page.ucp_version }} +docker/ucp-compose:{{ page.ucp_version }} +docker/ucp-controller:{{ page.ucp_version }} +docker/ucp-dsinfo:{{ page.ucp_version }} +docker/ucp-etcd:{{ page.ucp_version }} +docker/ucp-hyperkube:{{ page.ucp_version }} +docker/ucp-interlock-extension:{{ page.ucp_version }} +docker/ucp-interlock-proxy:{{ page.ucp_version }} +docker/ucp-interlock:{{ page.ucp_version }} +docker/ucp-kube-compose-api:{{ page.ucp_version }} +docker/ucp-kube-compose:{{ page.ucp_version }} +docker/ucp-kube-dns-dnsmasq-nanny:{{ page.ucp_version }} +docker/ucp-kube-dns-sidecar:{{ page.ucp_version }} +docker/ucp-kube-dns:{{ page.ucp_version }} +docker/ucp-metrics:{{ page.ucp_version }} +docker/ucp-pause:{{ page.ucp_version }} +docker/ucp-swarm:{{ page.ucp_version }} +docker/ucp:{{ page.ucp_version }} +``` + +Interlock starts and checks the config object, which has the new extension version, and +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ## Updating the Interlock service Remove the old configuration and specify the new configuration: @@ -27,10 +116,24 @@ $> docker service update --config-add source=service.interlock.conf.v2,target=/c Next, update the Interlock service to use the new image. The following example updates the Interlock core service to use the `sha256:d173014908eb09e9a70d8e5ed845469a61f7cbf4032c28fad0ed9af3fc04ef51` version of Interlock. Interlock starts and checks the config object, which has the new extension version, and +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 performs a rolling deploy to update all extensions. ```bash $> docker service update \ +<<<<<<< HEAD +<<<<<<< HEAD + --image {{ page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} \ + ucp-interlock +======= --image interlockpreview/interlock@sha256:d173014908eb09e9a70d8e5ed845469a61f7cbf4032c28fad0ed9af3fc04ef51 \ interlock +>>>>>>> Raw content addition +======= + --image interlockpreview/interlock@sha256:d173014908eb09e9a70d8e5ed845469a61f7cbf4032c28fad0ed9af3fc04ef51 \ + interlock +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ``` diff --git a/ee/ucp/interlock/deploy/index.md b/ee/ucp/interlock/deploy/index.md index 972b40c8fc..8d93b04255 100644 --- a/ee/ucp/interlock/deploy/index.md +++ b/ee/ucp/interlock/deploy/index.md @@ -1,11 +1,26 @@ --- +<<<<<<< HEAD +title: Deploy a layer 7 routing solution +description: Learn the deployment steps for the UCP layer 7 routing solution +keywords: routing, proxy, interlock +======= title: Deploying a layer 7 routing solution for UCP to route traffic to swarm services description: Learn the deployment steps for the UCP layer 7 routing solution keywords: routing, proxy +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 redirect_from: - /ee/ucp/interlock/deploy/configuration-reference/ --- +<<<<<<< HEAD +This topic covers deploying a layer 7 routing solution into a Docker Swarm to route traffic to Swarm services. Layer 7 routing is also referred to as an HTTP routing mesh. + +1. [Prerequisites](#prerequisites) +2. [Enable layer 7 routing](#enable-layer-7-routing) +3. [Work with the core service configuration file](#work-with-the-core-service-configuration-file) +4. [Create a dedicated network for Interlock and extensions](#create-a-dedicated-network-for-Interlock-and-extensions) +5. [Create the Interlock service](#create-the-interlock-service) +======= # Deploying basic layer 7 routing and Interlock This topic covers deploying a layer 7 routing solution for UCP into a Docker Swarm. Layer 7 routing is also referred to as HTTP routing mesh. @@ -14,12 +29,18 @@ This topic covers deploying a layer 7 routing solution for UCP into a Docker Swa 3. Working with the core service configuration file 4. Creating a dedicated network for Interlock and extensions 5. Creating the Interlock service +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ## Prerequisites - [Docker](https://www.docker.com) version 17.06 or later +<<<<<<< HEAD +- Docker must be running in [Swarm mode](/engine/swarm/) +- Internet access (see [Offline installation](./offline-install.md) for installing without internet access) +======= - Docker must be running in [Swarm mode](https://docs.docker.com/engine/swarm/) - Internet access (see [Offline Installation](offline.md) for installing without internet access) +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ## Enable layer 7 routing By default, layer 7 routing is disabled, so you must first @@ -31,7 +52,7 @@ enable this service from the UCP web UI. ![http routing mesh](../../images/interlock-install-3.png){: .with-border} -By default, the routing mesh service listens on port 80 for HTTP and port +By default, the routing mesh service listens on port 8080 for HTTP and port 8443 for HTTPS. Change the ports if you already have services that are using them. @@ -121,9 +142,16 @@ PollInterval = "3s" LargeClientHeaderBuffers = "4 8k" ClientBodyTimeout = "60s" UnderscoresInHeaders = false +<<<<<<< HEAD + HideInfoHeaders = false +``` + +### Work with the core service configuration file +======= ``` ### Working with the core service configuration file +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Interlock uses the TOML file for the core service configuration. The following example utilizes Swarm deployment and recovery features by creating a Docker Config object: ```bash @@ -134,9 +162,15 @@ PollInterval = "3s" [Extensions] [Extensions.default] +<<<<<<< HEAD + Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}" + Args = ["-D"] + ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}" +======= Image = "interlockpreview/interlock-extension-nginx:2.0.0-preview" Args = ["-D"] ProxyImage = "nginx:alpine" +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ProxyArgs = [] ProxyConfigPath = "/etc/nginx/nginx.conf" ProxyReplicas = 1 @@ -157,7 +191,11 @@ EOF oqkvv1asncf6p2axhx41vylgt ``` +<<<<<<< HEAD +### Create a dedicated network for Interlock and extensions +======= ### Creating a dedicated network for Interlock and extensions +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Next, create a dedicated network for Interlock and the extensions: @@ -165,10 +203,17 @@ Next, create a dedicated network for Interlock and the extensions: $> docker network create -d overlay interlock ``` +<<<<<<< HEAD +### Create the Interlock service +Now you can create the Interlock service. Note the requirement to constrain to a manager. The +Interlock core service must have access to a Swarm manager, however the extension and proxy services +are recommended to run on workers. See the [Production](./production.md) section for more information +======= ### Creating the Interlock service Now you can create the Interlock service. Note the requirement to constrain to a manager. The Interlock core service must have access to a Swarm manager, however the extension and proxy services are recommended to run on workers. See the [Production](production.md) section for more information +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 on setting up for an production environment. ```bash @@ -178,7 +223,11 @@ $> docker service create \ --network interlock \ --constraint node.role==manager \ --config src=service.interlock.conf,target=/config.toml \ +<<<<<<< HEAD + {{ page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} -D run -c /config.toml +======= interlockpreview/interlock:2.0.0-preview -D run -c /config.toml +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 sjpgq7h621exno6svdnsvpv9z ``` @@ -189,15 +238,27 @@ one for the extension service, and one for the proxy service: $> docker service ls ID NAME MODE REPLICAS IMAGE PORTS lheajcskcbby modest_raman replicated 1/1 nginx:alpine *:80->80/tcp *:443->443/tcp +<<<<<<< HEAD +oxjvqc6gxf91 keen_clarke replicated 1/1 {{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }} +sjpgq7h621ex interlock replicated 1/1 {{ page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} +======= oxjvqc6gxf91 keen_clarke replicated 1/1 interlockpreview/interlock-extension-nginx:2.0.0-preview sjpgq7h621ex interlock replicated 1/1 interlockpreview/interlock:2.0.0-preview +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ``` The Interlock traffic layer is now deployed. ## Next steps +<<<<<<< HEAD +- [Configure Interlock](../config/index.md) +- [Deploy applications](../usage/index.md) +- [Production deployment information](./production.md) +- [Offline installation](./offline-install.md) +======= - [Configuring Interlock](../config/index.md) - [Deploying applications](../usage/index.md) - [Production deployment information](./production.md) - [Offline installation information](./offline.md) +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 diff --git a/ee/ucp/interlock/deploy/offline-install.md b/ee/ucp/interlock/deploy/offline-install.md index 65c7574309..52e1bcb82e 100644 --- a/ee/ucp/interlock/deploy/offline-install.md +++ b/ee/ucp/interlock/deploy/offline-install.md @@ -1,16 +1,33 @@ --- title: Offline installation considerations +<<<<<<< HEAD +description: Learn how to to install Interlock on a Docker cluster without internet access. +keywords: routing, proxy, interlock +--- + +======= description: Learn how to To install Interlock on a Docker cluster without internet access. keywords: routing, proxy --- # Offline installation +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 To install Interlock on a Docker cluster without internet access, the Docker images must be loaded. This topic describes how to export the images from a local Docker engine and then loading them to the Docker Swarm cluster. First, using an existing Docker engine, save the images: ```bash +<<<<<<< HEAD +$> docker save {{ page.ucp_org }}/ucp-interlock:{{ page.ucp_version }} > interlock.tar +$> docker save {{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }} > interlock-extension-nginx.tar +$> docker save {{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }} > nginx.tar +``` + +Note: replace `{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version +}}` and `{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}` with the +corresponding extension and proxy image if you are not using Nginx. +======= $> docker save docker/interlock:latest > interlock.tar $> docker save docker/interlock-extension-nginx:latest > interlock-extension-nginx.tar $> docker save nginx:alpine > nginx.tar @@ -18,6 +35,7 @@ $> docker save nginx:alpine > nginx.tar Note: replace `docker/interlock-extension-nginx:latest` and `nginx:alpine` with the corresponding extension and proxy image if you are not using Nginx. +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 You should have the following two files: @@ -34,5 +52,9 @@ $> docker load < nginx:alpine.tar ``` ## Next steps +<<<<<<< HEAD +After running on each node, refer to the [Deploy](./index.md) section to +======= After running on each node, you can continue to the [Deployment](index.md) section to +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 continue the installation. diff --git a/ee/ucp/interlock/deploy/production.md b/ee/ucp/interlock/deploy/production.md index d2a1736aee..c7a6140f98 100644 --- a/ee/ucp/interlock/deploy/production.md +++ b/ee/ucp/interlock/deploy/production.md @@ -2,14 +2,21 @@ title: Configure layer 7 routing for production description: Learn how to configure the layer 7 routing solution for a production environment. -keywords: routing, proxy +keywords: routing, proxy, interlock --- +<<<<<<< HEAD +This section includes documentation on configuring Interlock +for a production environment. If you have not yet deployed Interlock, refer to [Deploying Interlock](./index.md) because this information builds upon the basic deployment. This topic does not cover infrastructure deployment - +it assumes you have a vanilla Swarm cluster (`docker init` and `docker swarm join` from the nodes). +Refer to the [Swarm](/engine/swarm/) documentation if you need help +======= # Deploying to production This section includes documentation on configuring Interlock for a production environment. If you have not yet deployed Interlock, refer to [Deploying Interlock](index.md) because this information builds upon the basic deployment. This topic does not cover infrastructure deployment - it assumes you have a vanilla Swarm cluster (`docker init` and `docker swarm join` from the nodes). Refer to the [Swarm](https://docs.docker.com/engine/swarm/) documentation if you need help +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 getting a Swarm cluster deployed. The layer 7 solution that ships with UCP is highly available @@ -126,5 +133,10 @@ to provide more bandwidth for the user services. ![Interlock 2.0 Production Deployment](../../images/interlock_production_deploy.png) ## Next steps +<<<<<<< HEAD +- [Configure Interlock](../config/index.md) +- [Deploy applications](../usage.index.md) +======= - [Configuring Interlock](../config/index.md) - [Deploying applications](../usage.index.md) +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 diff --git a/ee/ucp/interlock/deploy/upgrade.md~HEAD b/ee/ucp/interlock/deploy/upgrade.md~HEAD new file mode 100644 index 0000000000..19f2c514d6 --- /dev/null +++ b/ee/ucp/interlock/deploy/upgrade.md~HEAD @@ -0,0 +1,129 @@ +--- +title: Layer 7 routing upgrade +description: Learn how to upgrade your existing layer 7 routing solution +keywords: routing, proxy, hrm +redirect_from: + - /ee/ucp/interlock/upgrade/ +--- + +The [HTTP routing mesh](/datacenter/ucp/2.2/guides/admin/configure/use-domain-names-to-access-services.md) +functionality was redesigned in UCP 3.0 for greater security and flexibility. +The functionality was also renamed to "layer 7 routing", to make it easier for +new users to get started. + +[Learn about the new layer 7 routing functionality](../index.md). + +To route traffic to your service you apply specific labels to your swarm +services, describing the hostname for the service and other configurations. +Things work in the same way as they did with the HTTP routing mesh, with the +only difference being that you use different labels. + +You don't have to manually update your services. During the upgrade process to +3.0, UCP updates the services to start using new labels. + +This article describes the upgrade process for the routing component, so that +you can troubleshoot UCP and your services, in case something goes wrong with +the upgrade. + +If you are using the HTTP routing mesh, and start an upgrade to UCP 3.0: + +1. UCP starts a reconciliation process to ensure all internal components are +deployed. As part of this, services using HRM labels are inspected. +2. UCP creates the `com.docker.ucp.interlock.conf-` based on HRM configurations. +3. The HRM service is removed. +4. The `ucp-interlock` service is deployed with the configuration created. +5. The `ucp-interlock` service deploys the `ucp-interlock-extension` and +`ucp-interlock-proxy-services`. + +The only way to rollback from an upgrade is by restoring from a backup taken +before the upgrade. If something goes wrong during the upgrade process, you +need to troubleshoot the interlock services and your services, since the HRM +service won't be running after the upgrade. + +[Learn more about the interlock services and architecture](../architecture.md). + +## Check that routing works + +After upgrading to UCP 3.0, you should check if all swarm services are still +routable. + +For services using HTTP: + +```bash +curl -vs http://:/ -H "Host: " +``` + +For services using HTTPS: + +```bash +curl -vs https://: +``` + +After the upgrade, check that you can still use the same hostnames to access +the swarm services. + +## The ucp-interlock services are not running + +After the upgrade to UCP 3.0, the following services should be running: + +* `ucp-interlock`: monitors swarm workloads configured to use layer 7 routing. +* `ucp-interlock-extension`: Helper service that generates the configuration for +the `ucp-interlock-proxy` service. +* `ucp-interlock-proxy`: A service that provides load balancing and proxying for +swarm workloads. + +To check if these services are running, use a client bundle with administrator +permissions and run: + +```bash +docker ps --filter "name=ucp-interlock" +``` + +* If the `ucp-interlock` service doesn't exist or is not running, something went +wrong with the reconciliation step. +* If this still doesn't work, it's possible that UCP is having problems creating +the `com.docker.ucp.interlock.conf-1`, due to name conflicts. Make sure you +don't have any configuration with the same name by running: + ``` + docker config ls --filter "name=com.docker.ucp.interlock" + ``` +* If either the `ucp-interlock-extension` or `ucp-interlock-proxy` services are +not running, it's possible that there are port conflicts. +As a workaround re-enable the layer 7 routing configuration from the +[UCP settings page](deploy/index.md). Make sure the ports you choose are not +being used by other services. + +## Workarounds and clean-up + +If you have any of the problems above, disable and enable the layer 7 routing +setting on the [UCP settings page](index.md). This redeploys the +services with their default configuration. + +When doing that make sure you specify the same ports you were using for HRM, +and that no other services are listening on those ports. + +You should also check if the `ucp-hrm` service is running. If it is, you should +stop it since it can conflict with the `ucp-interlock-proxy` service. + +## Optionally remove labels + +As part of the upgrade process UCP adds the +[labels specific to the new layer 7 routing solution](../usage/labels-reference.md). + +You can update your services to remove the old HRM labels, since they won't be +used anymore. + +## Optionally segregate control traffic + +Interlock is designed so that all the control traffic is kept separate from +the application traffic. + +If before upgrading you had all your applications attached to the `ucp-hrm` +network, after upgrading you can update your services to start using a +dedicated network for routing that's not shared with other services. +[Learn how to use a dedicated network](../usage/index.md). + +If before upgrading you had a dedicate network to route traffic to each service, +Interlock will continue using those dedicated networks. However the +`ucp-interlock` will be attached to each of those networks. You can update +the `ucp-interlock` service so that it is only connected to the `ucp-hrm` network. diff --git a/ee/ucp/interlock/deploy/upgrade.md~Raw content addition b/ee/ucp/interlock/deploy/upgrade.md~Raw content addition new file mode 100644 index 0000000000..5e38c088fe --- /dev/null +++ b/ee/ucp/interlock/deploy/upgrade.md~Raw content addition @@ -0,0 +1,130 @@ +--- +title: Layer 7 routing upgrade +description: Learn how to upgrade your existing layer 7 routing solution +keywords: routing, proxy, hrm +redirect_from: + - /ee/ucp/interlock/upgrade/ +--- +# UCP upgrade process + +The [HTTP routing mesh](/datacenter/ucp/2.2/guides/admin/configure/use-domain-names-to-access-services.md) +functionality was redesigned in UCP 3.0 for greater security and flexibility. +The functionality was also renamed to "layer 7 routing", to make it easier for +new users to get started. + +[Learn about the new layer 7 routing functionality](../index.md). + +To route traffic to your service you apply specific labels to your swarm +services, describing the hostname for the service and other configurations. +Things work in the same way as they did with the HTTP routing mesh, with the +only difference being that you use different labels. + +You don't have to manually update your services. During the upgrade process to +3.0, UCP updates the services to start using new labels. + +This article describes the upgrade process for the routing component, so that +you can troubleshoot UCP and your services, in case something goes wrong with +the upgrade. + +If you are using the HTTP routing mesh, and start an upgrade to UCP 3.0: + +1. UCP starts a reconciliation process to ensure all internal components are +deployed. As part of this, services using HRM labels are inspected. +2. UCP creates the `com.docker.ucp.interlock.conf-` based on HRM configurations. +3. The HRM service is removed. +4. The `ucp-interlock` service is deployed with the configuration created. +5. The `ucp-interlock` service deploys the `ucp-interlock-extension` and +`ucp-interlock-proxy-services`. + +The only way to rollback from an upgrade is by restoring from a backup taken +before the upgrade. If something goes wrong during the upgrade process, you +need to troubleshoot the interlock services and your services, since the HRM +service won't be running after the upgrade. + +[Learn more about the interlock services and architecture](../architecture.md). + +## Check that routing works + +After upgrading to UCP 3.0, you should check if all swarm services are still +routable. + +For services using HTTP: + +```bash +curl -vs http://:/ -H "Host: " +``` + +For services using HTTPS: + +```bash +curl -vs https://: +``` + +After the upgrade, check that you can still use the same hostnames to access +the swarm services. + +## The ucp-interlock services are not running + +After the upgrade to UCP 3.0, the following services should be running: + +* `ucp-interlock`: monitors swarm workloads configured to use layer 7 routing. +* `ucp-interlock-extension`: Helper service that generates the configuration for +the `ucp-interlock-proxy` service. +* `ucp-interlock-proxy`: A service that provides load balancing and proxying for +swarm workloads. + +To check if these services are running, use a client bundle with administrator +permissions and run: + +```bash +docker ps --filter "name=ucp-interlock" +``` + +* If the `ucp-interlock` service doesn't exist or is not running, something went +wrong with the reconciliation step. +* If this still doesn't work, it's possible that UCP is having problems creating +the `com.docker.ucp.interlock.conf-1`, due to name conflicts. Make sure you +don't have any configuration with the same name by running: + ``` + docker config ls --filter "name=com.docker.ucp.interlock" + ``` +* If either the `ucp-interlock-extension` or `ucp-interlock-proxy` services are +not running, it's possible that there are port conflicts. +As a workaround re-enable the layer 7 routing configuration from the +[UCP settings page](deploy/index.md). Make sure the ports you choose are not +being used by other services. + +## Workarounds and clean-up + +If you have any of the problems above, disable and enable the layer 7 routing +setting on the [UCP settings page](index.md). This redeploys the +services with their default configuration. + +When doing that make sure you specify the same ports you were using for HRM, +and that no other services are listening on those ports. + +You should also check if the `ucp-hrm` service is running. If it is, you should +stop it since it can conflict with the `ucp-interlock-proxy` service. + +## Optionally remove labels + +As part of the upgrade process UCP adds the +[labels specific to the new layer 7 routing solution](../usage/labels-reference.md). + +You can update your services to remove the old HRM labels, since they won't be +used anymore. + +## Optionally segregate control traffic + +Interlock is designed so that all the control traffic is kept separate from +the application traffic. + +If before upgrading you had all your applications attached to the `ucp-hrm` +network, after upgrading you can update your services to start using a +dedicated network for routing that's not shared with other services. +[Learn how to use a dedicated network](../usage/index.md). + +If before upgrading you had a dedicate network to route traffic to each service, +Interlock will continue using those dedicated networks. However the +`ucp-interlock` will be attached to each of those networks. You can update +the `ucp-interlock` service so that it is only connected to the `ucp-hrm` network. diff --git a/ee/ucp/interlock/index.md b/ee/ucp/interlock/index.md index d1ea689248..a5aad8a2a1 100644 --- a/ee/ucp/interlock/index.md +++ b/ee/ucp/interlock/index.md @@ -1,11 +1,24 @@ --- title: Layer 7 routing overview description: Learn how to route layer 7 traffic to your Swarm services +<<<<<<< HEAD +<<<<<<< HEAD +keywords: routing, UCP, interlock, load balancing +--- + +Application-layer (Layer 7) routing is the application routing and load balancing (ingress routing) system included with Docker Enterprise for Swarm orchestration. Interlock architecture takes advantage of the underlying Swarm components to provide scalable Layer 7 routing and Layer 4 VIP mode functionality. +======= +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 keywords: routing, proxy --- ## Introduction Interlock is the application routing and load balancing (ingress routing) system included with Docker Enterprise for Swarm orchestration. Interlock takes advantage of the underlying Swarm components to provide scalable Layer 7 routing and Layer 4 VIP mode functionality. +<<<<<<< HEAD +>>>>>>> Raw content addition +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Interlock is specific to the Swarm orchestrator. If you're trying to route traffic to your Kubernetes applications, check diff --git a/ee/ucp/interlock/usage/canary.md b/ee/ucp/interlock/usage/canary.md index 3462e6715a..ce5dabe210 100644 --- a/ee/ucp/interlock/usage/canary.md +++ b/ee/ucp/interlock/usage/canary.md @@ -1,10 +1,13 @@ --- -title: Canary application instances +title: Publish Canary application instances description: Learn how to do canary deployments for your Docker swarm services keywords: routing, proxy --- +<<<<<<< HEAD +======= # Publishing a service as a canary instance +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 The following example publishes a service as a canary instance. First, create an overlay network to isolate and secure service traffic: @@ -28,7 +31,11 @@ $> docker service create \ ehazlett/docker-demo ``` +<<<<<<< HEAD +Interlock detects when the service is available and publishes it. After tasks are running +======= Interlock detects when the service is available and publishes it.After tasks are running +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 and the proxy service is updated, the application is available via `http://demo.local`: ```bash @@ -59,7 +66,11 @@ $> curl -vs -H "Host: demo.local" http://127.0.0.1/ping Notice `metadata` is specified with `demo-version-1`. +<<<<<<< HEAD +## Deploy an updated service as a canary instance +======= # Deploying an updated service as a canary instance +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 The following example deploys an updated service as a canary instance: ```bash diff --git a/ee/ucp/interlock/usage/context.md b/ee/ucp/interlock/usage/context.md index 2c93a0e081..47f81be2ae 100644 --- a/ee/ucp/interlock/usage/context.md +++ b/ee/ucp/interlock/usage/context.md @@ -1,11 +1,18 @@ --- +<<<<<<< HEAD +title: Use context and path-based routing +======= title: Context and path-based routing +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 description: Learn how to route traffic to your Docker swarm services based on a url path. keywords: routing, proxy --- +<<<<<<< HEAD +======= # Using context or path-based routing +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 The following example publishes a service using context or path based routing. First, create an overlay network so that service traffic is isolated and secure: @@ -63,4 +70,3 @@ $> curl -vs -H "Host: demo.local" http://127.0.0.1/app/ < x-upstream-response-time: 1510928717.306 ... ``` - diff --git a/ee/ucp/interlock/usage/index.md b/ee/ucp/interlock/usage/index.md index 18c980613e..76999ac73e 100644 --- a/ee/ucp/interlock/usage/index.md +++ b/ee/ucp/interlock/usage/index.md @@ -7,6 +7,17 @@ redirect_from: - /ee/ucp/interlock/deploy/configure/ --- +<<<<<<< HEAD +After Interlock is deployed, you can launch and publish services and applications. +Use [Service Labels](/engine/reference/commandline/service_create/#set-metadata-on-a-service--l-label) +to configure services to publish themselves to the load balancer. + +The following examples assume a DNS entry (or local hosts entry if you are testing locally) exists +for each of the applications. + +## Publish a service with four replicas +Create a Docker Service using two labels: +======= ## Deploying services and applications After Interlock is deployed, you can launch and publish services and applications. Use [Service Labels](https://docs.docker.com/engine/reference/commandline/service_create/#set-metadata-on-a-service--l-label) @@ -17,6 +28,7 @@ for each of the applications. ## Publishing a service with four replicas To publish, create a Docker Service using two labels: +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 - `com.docker.lb.hosts` - `com.docker.lb.port` @@ -25,7 +37,11 @@ The `com.docker.lb.hosts` label instructs Interlock where the service should be The `com.docker.lb.port` label instructs what port the proxy service should use to access the upstreams. +<<<<<<< HEAD +Publish a demo service to the host `demo.local`: +======= This example publishes a demo service to the host `demo.local`. +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 First, create an overlay network so that service traffic is isolated and secure: @@ -64,10 +80,15 @@ demo scaled to 4 In this example, four service replicas are configured as upstreams. The load balancer balances traffic across all service replicas. +<<<<<<< HEAD +## Publish a service with a web interface +This example deploys a simple service that: +======= -------------------------------ARE BOTH EXAMPLES NEEDED?----------------------------------------------- ## Publishing a service with a web interface The next example deploys a simple service that: +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 * Has a JSON endpoint that returns the ID of the task serving the request. * Has a web interface that shows how many tasks the service is running. @@ -151,6 +172,21 @@ able to start using the service from your browser. ![browser](../../images/route-simple-app-1.png){: .with-border } +<<<<<<< HEAD +## Next steps + +- [Publish a service as a canary instance](./canary.md) +- [Usie context or path-based routing](./context.md) +- [Publish a default host service](./interlock-vip-mode.md) +- [Specify a routing mode](./interlock-vip-mode.md) +- [Use routing labels](./labels-reference.md) +- [Implement redirects](./redirects.md) +- [Implement a service cluster](./service-clusters.md) +- [Implement persistent (sticky) sessions](./sessions.md) +- [Implement SSL](./ssl.md) +- [Secure services with TLS](./tls.md) +- [Configure websockets](./websockets.md) +======= ## Next steps @@ -165,3 +201,4 @@ able to start using the service from your browser. - [Implementing SSL](ssl.md) - [Securing services with TLS](tls.md) - [Configuring websockets](websockets.md) +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 diff --git a/ee/ucp/interlock/usage/interlock-vip-mode.md b/ee/ucp/interlock/usage/interlock-vip-mode.md index 6349a67512..bdf6814d1d 100644 --- a/ee/ucp/interlock/usage/interlock-vip-mode.md +++ b/ee/ucp/interlock/usage/interlock-vip-mode.md @@ -1,4 +1,16 @@ --- +<<<<<<< HEAD +title: Specify a routing mode +description: Learn about task and VIP backend routing modes for Layer 7 routing +keywords: routing, proxy, interlock +redirect_from: + - /ee/ucp/interlock/usage/default-backend/ +--- + +You can publish services using "vip" and "task" backend routing modes. + +## Task routing mode +======= title: Routing modes description: Learn about task and VIP backend routing modes for Layer 7 routing keywords: routing, proxy @@ -8,6 +20,7 @@ keywords: routing, proxy You can publish services using "vip" and "task" backend routing modes. ### Task Routing Mode +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 Task routing is the default Interlock behavior and the default backend mode if one is not specified. In task routing mode, Interlock uses backend task IPs to route traffic from the proxy to each container. @@ -17,7 +30,11 @@ Task routing mode applies L7 routing and then sends packets directly to a contai ![task mode](../../images/interlock-task-mode.png) +<<<<<<< HEAD +## VIP routing mode +======= ### VIP Routing Mode +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 VIP mode is an alternative mode of routing in which Interlock uses the Swarm service VIP as the backend IP instead of container IPs. Traffic to the frontend route is L7 load balanced to the Swarm service VIP, which L4 load balances to backend tasks. @@ -30,7 +47,6 @@ In VIP routing mode Interlock uses the service VIP (a persistent endpoint that e VIP routing mode was introduced in Universal Control Plane (UCP) 3.0 version 3.0.3 and 3.1 version 3.1.2. VIP routing mode applies L7 routing and then sends packets to the Swarm L4 load balancer which routes traffic service containers. - ![vip mode](../../images/interlock-vip-mode.png) While VIP mode provides endpoint stability in the face of application churn, it cannot support sticky sessions because sticky sessions depend on routing directly to container IPs. @@ -65,7 +81,11 @@ The following two updates still require a proxy reconfiguration (because these a - Add/Remove a network on a service - Deployment/Deletion of a service +<<<<<<< HEAD +#### Publish a default host service +======= #### Publishing a default host service +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 The following example publishes a service to be a default host. The service responds whenever there is a request to a host that is not configured. diff --git a/ee/ucp/interlock/usage/labels-reference.md b/ee/ucp/interlock/usage/labels-reference.md index b6e2a2bf7a..e5dbafb2e1 100644 --- a/ee/ucp/interlock/usage/labels-reference.md +++ b/ee/ucp/interlock/usage/labels-reference.md @@ -1,13 +1,17 @@ --- -title: Layer 7 routing labels reference +title: Use layer 7 routing labels description: Learn about the labels you can use in your swarm services to route - layer 7 traffic to them. + layer 7 traffic. keywords: routing, proxy --- +<<<<<<< HEAD +After you enable the layer 7 routing solution, you can +======= ## Using routing labels Once the layer 7 routing solution is enabled, you can +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 [start using it in your swarm services](index.md). diff --git a/ee/ucp/interlock/usage/redirects.md b/ee/ucp/interlock/usage/redirects.md index 7fd9d6474b..9fa2a7fa4c 100644 --- a/ee/ucp/interlock/usage/redirects.md +++ b/ee/ucp/interlock/usage/redirects.md @@ -1,10 +1,15 @@ --- -title: Application redirects +title: Implement application redirects description: Learn how to implement redirects using swarm services and the layer 7 routing solution for UCP. -keywords: routing, proxy, redirects +keywords: routing, proxy, redirects, interlock --- +<<<<<<< HEAD +The following example publishes a service and configures a redirect from `old.local` to `new.local`. + +First, create an overlay network so that service traffic is isolated and secure: +======= # Implementing redirects The following example deploys a simple service that can be reached at `app.example.org`. Requests to `old.example.org` are redirected to that service. @@ -39,28 +44,71 @@ should be redirected to `app.example.org`. Set up your CLI client with a [UCP client bundle](../../user-access/cli.md), and deploy the service: +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ```bash -docker stack deploy --compose-file docker-compose.yml demo +$> docker network create -d overlay demo +1se1glh749q1i4pw0kf26mfx5 ``` +<<<<<<< HEAD +Next, create the service with the redirect: +======= You can also use the CLI to test if the redirect is working, by running the following command: +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ```bash -curl --head --header "Host: old.example.org" http://: +$> docker service create \ + --name demo \ + --network demo \ + --detach=false \ + --label com.docker.lb.hosts=old.local,new.local \ + --label com.docker.lb.port=8080 \ + --label com.docker.lb.redirects=http://old.local,http://new.local \ + --env METADATA="demo-new" \ + ehazlett/docker-demo ``` +<<<<<<< HEAD +Interlock detects when the service is available and publishes it. After tasks are running +and the proxy service is updated, the application is available via `http://new.local` +with a redirect configured that sends `http://old.local` to `http://new.local`: +======= You should see something like the following output: +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 -```none -HTTP/1.1 302 Moved Temporarily -Server: nginx/1.13.8 -Date: Thu, 29 Mar 2018 23:16:46 GMT -Content-Type: text/html -Content-Length: 161 -Connection: keep-alive -Location: http://app.example.org/ +```bash +$> curl -vs -H "Host: old.local" http://127.0.0.1 +* Rebuilt URL to: http://127.0.0.1/ +* Trying 127.0.0.1... +* TCP_NODELAY set +* Connected to 127.0.0.1 (127.0.0.1) port 80 (#0) +> GET / HTTP/1.1 +> Host: old.local +> User-Agent: curl/7.54.0 +> Accept: */* +> +< HTTP/1.1 302 Moved Temporarily +< Server: nginx/1.13.6 +< Date: Wed, 08 Nov 2017 19:06:27 GMT +< Content-Type: text/html +< Content-Length: 161 +< Connection: keep-alive +< Location: http://new.local/ +< x-request-id: c4128318413b589cafb6d9ff8b2aef17 +< x-proxy-id: 48854cd435a4 +< x-server-info: interlock/2.0.0-development (147ff2b1) linux/amd64 +< + +302 Found + +

    302 Found

    +
    nginx/1.13.6
    + + ``` +<<<<<<< HEAD +======= You can also test that the redirect works from your browser. For that, make sure you add entries for both `app.example.org` and `old.example.org` to your `/etc/hosts` file and map them to the IP address @@ -127,3 +175,4 @@ $> curl -vs -H "Host: old.local" http://127.0.0.1 ``` +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 diff --git a/ee/ucp/interlock/usage/service-clusters.md b/ee/ucp/interlock/usage/service-clusters.md index 18ea84e097..c37007d68a 100644 --- a/ee/ucp/interlock/usage/service-clusters.md +++ b/ee/ucp/interlock/usage/service-clusters.md @@ -1,4 +1,90 @@ --- +<<<<<<< HEAD +title: Implement service clusters +description: Learn how to route traffic to different proxies using a service cluster. +keywords: ucp, interlock, load balancing, routing +--- + +## Configure Proxy Services +With the node labels, you can re-configure the Interlock Proxy services to be constrained to the +workers for each region. FOr example, from a manager, run the following commands to pin the proxy services to the ingress workers: + +```bash +$> docker service update \ + --constraint-add node.labels.nodetype==loadbalancer \ + --constraint-add node.labels.region==us-east \ + ucp-interlock-proxy-us-east +$> docker service update \ + --constraint-add node.labels.nodetype==loadbalancer \ + --constraint-add node.labels.region==us-west \ + ucp-interlock-proxy-us-west +``` + +You are now ready to deploy applications. First, create individual networks for each application: + +```bash +$> docker network create -d overlay demo-east +$> docker network create -d overlay demo-west +``` + +Next, deploy the application in the `us-east` service cluster: + +```bash +$> docker service create \ + --name demo-east \ + --network demo-east \ + --detach=true \ + --label com.docker.lb.hosts=demo-east.local \ + --label com.docker.lb.port=8080 \ + --label com.docker.lb.service_cluster=us-east \ + --env METADATA="us-east" \ + ehazlett/docker-demo +``` + +Now deploy the application in the `us-west` service cluster: + +```bash +$> docker service create \ + --name demo-west \ + --network demo-west \ + --detach=true \ + --label com.docker.lb.hosts=demo-west.local \ + --label com.docker.lb.port=8080 \ + --label com.docker.lb.service_cluster=us-west \ + --env METADATA="us-west" \ + ehazlett/docker-demo +``` + +Only the designated service cluster is configured for the applications. For example, the `us-east` service cluster +is not configured to serve traffic for the `us-west` service cluster and vice versa. You can observe this when you +send requests to each service cluster. + +When you send a request to the `us-east` service cluster, it only knows about the `us-east` application. This example uses IP address lookup from the swarm API, so you must `ssh` to a manager node or configure your shell with a UCP client bundle before testing: + +```bash +{% raw %} +$> curl -H "Host: demo-east.local" http://$(docker node inspect -f '{{ .Status.Addr }}' lb-00):8080/ping +{"instance":"1b2d71619592","version":"0.1","metadata":"us-east","request_id":"3d57404cf90112eee861f9d7955d044b"} +$> curl -H "Host: demo-west.local" http://$(docker node inspect -f '{{ .Status.Addr }}' lb-00):8080/ping + +404 Not Found + +

    404 Not Found

    +
    nginx/1.13.6
    + + +{% endraw %} +``` + +Application traffic is isolated to each service cluster. Interlock also ensures that a proxy is updated only if it has corresponding updates to its designated service cluster. In this example, updates to the `us-east` cluster do not affect the `us-west` cluster. If there is a problem, the others are not affected. + +## Usage + +The following example configures an eight (8) node Swarm cluster that uses service clusters +to route traffic to different proxies. This example includes: + +- Three (3) managers and five (5) workers +======= title: Service clusters description: Learn how to route traffic to different proxies using a service cluster. keywords: ucp, interlock, load balancing @@ -9,6 +95,7 @@ The following example configures an eight (8) node Swarm cluster that uses servi to route traffic to different proxies. This example includes: - three (3) managers and five (5) workers +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 - Four workers that are configured with node labels to be dedicated ingress cluster load balancer nodes. These nodes receive all application traffic. @@ -87,7 +174,7 @@ PollInterval = "3s" Image = "{{ page.ucp_org }}/ucp-interlock-extension:{{ page.ucp_version }}" Args = [] ServiceName = "ucp-interlock-extension-us-west" - ProxyImage = "docker/ucp-interlock-proxy:3.1.2" + ProxyImage = "{{ page.ucp_org }}/ucp-interlock-proxy:{{ page.ucp_version }}" ProxyArgs = [] ProxyServiceName = "ucp-interlock-proxy-us-west" ProxyConfigPath = "/etc/nginx/nginx.conf" @@ -122,6 +209,8 @@ $> docker network create -d overlay ucp-interlock ``` Now [enable the Interlock service](../deploy/index.md#enable-layer-7-routing). +<<<<<<< HEAD +======= -------REMOVE THE FOLLOWING PER --------- https://github.com/docker/docker.github.io/issues/8415----- @@ -213,3 +302,4 @@ $> curl -H "Host: demo-west.local" http://$(docker node inspect -f '{{ .Status.A ``` Application traffic is isolated to each service cluster. Interlock also ensures that a proxy is updated only if it has corresponding updates to its designated service cluster. In this example, updates to the `us-east` cluster do not affect the `us-west` cluster. If there is a problem, the others are not affected. +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 diff --git a/ee/ucp/interlock/usage/sessions.md b/ee/ucp/interlock/usage/sessions.md index 96e6f6a8f5..c80feb04ab 100644 --- a/ee/ucp/interlock/usage/sessions.md +++ b/ee/ucp/interlock/usage/sessions.md @@ -1,15 +1,22 @@ --- -title: Persistent (sticky) sessions +title: Implement persistent (sticky) sessions description: Learn how to configure your swarm services with persistent sessions using UCP. -keywords: routing, proxy +keywords: routing, proxy, cookies, IP hash --- +<<<<<<< HEAD +You can publish a service and configure the proxy for persistent (sticky) sessions using: + +- Cookies +- IP hashing +======= # Implementing persistent (sticky) sessions You can publish a service and configure the proxy for persistent (sticky) sessions using: - Cookies - IP Hashing +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 ## Cookies To configure sticky sessions using cookies: @@ -132,4 +139,7 @@ to a specific backend. > **Note**: due to the way the IP hashing works for extensions, you will notice a new upstream address when scaling replicas. This is > expected, because internally the proxy uses the new set of replicas to determine a backend on which to pin. When the upstreams are > determined, a new "sticky" backend is chosen as the dedicated upstream. +<<<<<<< HEAD +======= +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 diff --git a/ee/ucp/interlock/usage/tls.md b/ee/ucp/interlock/usage/tls.md index ed2c3f6b81..0ed38d3afd 100644 --- a/ee/ucp/interlock/usage/tls.md +++ b/ee/ucp/interlock/usage/tls.md @@ -1,18 +1,26 @@ --- +<<<<<<< HEAD +title: Secure services with TLS +description: Learn how to configure your swarm services with TLS. +======= title: Securing services with TLS description: Learn how to configure your swarm services with TLS using the layer 7 routing solution for UCP. +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 keywords: routing, proxy, tls --- +<<<<<<< HEAD +======= # Securing services with TLS +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 After [deploying a layer 7 routing solution](../deploy/index.md), you have two options for securing your services with TLS: -* Let the proxy terminate the TLS connection. All traffic between end-users and +* [Let the proxy terminate the TLS connection.](#let-the-proxy-handle-tls) All traffic between end-users and the proxy is encrypted, but the traffic going between the proxy and your swarm service is not secured. -* Let your swarm service terminate the TLS connection. The end-to-end traffic +* [Let your swarm service terminate the TLS connection.](#let-your-service-handle-tls) The end-to-end traffic is encrypted and the proxy service allows TLS traffic to passthrough unchanged. Regardless of the option selected to secure swarm services, there are two steps required to @@ -22,7 +30,6 @@ route traffic with TLS: place the private key and certificate used for TLS. 2. Add labels to your swarm service for UCP to reconfigure the proxy service. - ## Let the proxy handle TLS The following example deploys a swarm service and lets the proxy service handle the TLS connection. All traffic between the proxy and the swarm service is diff --git a/ee/ucp/interlock/usage/websockets.md b/ee/ucp/interlock/usage/websockets.md index 7bc839654c..e233c20a3a 100644 --- a/ee/ucp/interlock/usage/websockets.md +++ b/ee/ucp/interlock/usage/websockets.md @@ -1,4 +1,11 @@ --- +<<<<<<< HEAD +title: Use websockets +description: Learn how to use websockets in your swarm services. +keywords: routing, proxy, websockets +--- + +======= title: Websockets description: Learn how to use websockets in your swarm services when using the layer 7 routing solution for UCP. @@ -7,6 +14,7 @@ keywords: routing, proxy # Using websockets +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 First, create an overlay network to isolate and secure service traffic: ```bash @@ -29,8 +37,14 @@ $> docker service create \ > **Note**: for this to work, you must have an entry for `demo.local` in your local hosts (i.e. `/etc/hosts`) file. > This uses the browser for websocket communication, so you must have an entry or use a routable domain. +<<<<<<< HEAD +======= Interlock detects when the service is available and publishes it. Once tasks are running and the proxy service is updated, the application should be available via `http://demo.local`. Open two instances of your browser and text should be displayed on both instances as you type. +>>>>>>> df4abbfc665cd5b9e518a8f6d91bd686f1bf8ce5 +Interlock detects when the service is available and publishes it. Once tasks are running +and the proxy service is updated, the application should be available via `http://demo.local`. Open +two instances of your browser and text should be displayed on both instances as you type. diff --git a/ee/ucp/kubernetes/create-service-account.md b/ee/ucp/kubernetes/create-service-account.md index 3e7336fd62..ac2c5ffa17 100644 --- a/ee/ucp/kubernetes/create-service-account.md +++ b/ee/ucp/kubernetes/create-service-account.md @@ -9,7 +9,7 @@ A service account represents an identity for processes that run in a pod. When a process is authenticated through a service account, it can contact the API server and access cluster resources. If a pod doesn't have an assigned service account, it gets the `default` service account. -Learn about [managing service accounts](https://v1-8.docs.kubernetes.io/docs/admin/service-accounts-admin/). +Learn about [managing service accounts](https://v1-11.docs.kubernetes.io/docs/admin/service-accounts-admin/). In Docker EE, you give a service account access to cluster resources by creating a grant, the same way that you would give access to a user or a team. @@ -86,4 +86,4 @@ assigned to the `nginx` namespace. ## Where to go next -- [Deploy an ingress controller for a Kubernetes app](deploy-ingress-controller.md) \ No newline at end of file +- [Deploy an ingress controller for a Kubernetes app](deploy-ingress-controller.md) diff --git a/ee/ucp/kubernetes/deploy-with-compose.md b/ee/ucp/kubernetes/deploy-with-compose.md index 406ec19207..e75aa8e656 100644 --- a/ee/ucp/kubernetes/deploy-with-compose.md +++ b/ee/ucp/kubernetes/deploy-with-compose.md @@ -16,13 +16,13 @@ true Kubernetes app. To deploy a stack to Kubernetes, you need a namespace for the app's resources. Contact your Docker EE administrator to get access to a namespace. In this -example, the namespace has the name `lab-words`. -[Learn to grant access to a Kubernetes namespace](../authorization/grant-permissions/#kubernetes-grants). +example, the namespace is called `labs`. +[Learn how to grant access to a Kubernetes namespace](../authorization/grant-permissions/#kubernetes-grants). ## Create a Kubernetes app from a Compose file In this example, you create a simple app, named "lab-words", by using a Compose -file. The following yaml defines the stack: +file. This assumes you are deploying onto a cloud infrastructure. The following YAML defines the stack: ```yaml version: '3.3' @@ -30,63 +30,47 @@ version: '3.3' services: web: build: web - image: dockerdemos/lab-web - volumes: - - "./web/static:/static" + image: dockersamples/k8s-wordsmith-web ports: - - "80:80" + - "8080:80" words: build: words - image: dockerdemos/lab-words + image: dockersamples/k8s-wordsmith-api deploy: replicas: 5 - endpoint_mode: dnsrr - resources: - limits: - memory: 16M - reservations: - memory: 16M db: build: db - image: dockerdemos/lab-db + image: dockersamples/k8s-wordsmith-db ``` -1. Open the UCP web UI, and in the left pane, click **Shared resources**. -2. Click **Stacks**, and in the **Stacks** page, click **Create stack**. -3. In the **Name** textbox, type "lab-words". -4. In the **Mode** dropdown, select **Kubernetes workloads**. -5. In the **Namespace** drowdown, select **lab-words**. -6. In the **docker-compose.yml** editor, paste the previous YAML. -7. Click **Create** to deploy the stack. +1. In your browser, log in to `https://`. Navigate to **Shared Resources > Stacks**. +2. Click **Create Stack** to open up the "Create Application" page. +3. Under "Configure Application", type "lab-words" for the application name. +4. Select **Kubernetes Workloads** for **Orchestrator Mode**. +5. In the **Namespace** drowdown, select "labs". +6. Under "Application File Mode", leave **Compose File** selected and click **Next**. +7. Paste the previous YAML, then click **Create** to deploy the stack. + ![](../images/deploy-compose-kubernetes-0.png){: .with-border} + + ## Inspect the deployment After a few minutes have passed, all of the pods in the `lab-words` deployment are running. -1. In the left pane, click **Pods**. Confirm that there are seven pods and - that their status is **Running**. If any have a status of **Pending**, - wait until they're all running. -2. Click one of the pods that has a name starting with **words**, and in the - details pane, scroll down to the **Pod IP** to view the pod's internal IP - address. - - ![](../images/deploy-compose-kubernetes-1.png){: .with-border} - -3. In the left pane, click **Load balancers** and find the **web-published** service. -4. Click the **web-published** service, and in the details pane, scroll down to the - **Spec** section. -5. Under **Ports**, click the URL to open the web UI for the `lab-words` app. +1. Navigate to **Kubernetes > Pods**. Confirm that there are seven pods and + that their status is **Running**. If any pod has a status of **Pending**, + wait until every pod is running. +2. Next, select **Kubernetes > Load balancers** and find the **web-published** service. +4. Click the **web-published** service, and scroll down to the + **Ports** section. +5. Under **Ports**, grab the Node Port information. ![](../images/deploy-compose-kubernetes-2.png){: .with-border} -6. Look at the IP addresses that are displayed in each tile. The IP address - of the pod you inspected previously may be listed. If it's not, refresh the - page until you see it. +6. In a new tab or window, enter your cloud instance public IP Address and append `:` from the previous step. For example, to find the public IP address of an EC2 instance, refer to [Amazon EC2 Instance IP Addressing](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/using-instance-addressing.html#concepts-public-addresses). The app is displayed. ![](../images/deploy-compose-kubernetes-3.png){: .with-border} - -7. Refresh the page to see how the load is balanced across the pods. - diff --git a/ee/ucp/kubernetes/index.md b/ee/ucp/kubernetes/index.md index d9081a790e..37c82117be 100644 --- a/ee/ucp/kubernetes/index.md +++ b/ee/ucp/kubernetes/index.md @@ -152,7 +152,7 @@ spec: ## Use the CLI to deploy Kubernetes objects With Docker EE, you deploy your Kubernetes objects on the command line by using -`kubectl`. [Install and set up kubectl](https://v1-8.docs.kubernetes.io/docs/tasks/tools/install-kubectl/). +`kubectl`. [Install and set up kubectl](https://v1-11.docs.kubernetes.io/docs/tasks/tools/install-kubectl/). Use a client bundle to configure your client tools, like Docker CLI and `kubectl` to communicate with UCP instead of the local deployments you might have running. diff --git a/ee/ucp/kubernetes/install-cni-plugin.md b/ee/ucp/kubernetes/install-cni-plugin.md index ee7c856d0d..b3b7e024ac 100644 --- a/ee/ucp/kubernetes/install-cni-plugin.md +++ b/ee/ucp/kubernetes/install-cni-plugin.md @@ -7,7 +7,7 @@ keywords: ucp, cli, administration, kubectl, Kubernetes, cni, Container Networki For Docker Universal Control Plane, [Project Calico](https://docs.projectcalico.org/v3.0/introduction/) provides the secure networking functionality for the container communication with Kubernetes. -UCP supports certified third-party Container Networking Interface (CNI) plugins. Docker EE installs the +Docker EE supports Calico and installs the built-in [Calico](https://github.com/projectcalico/cni-plugin) plugin, but you can override that and install a Docker certified plugin. diff --git a/ee/ucp/kubernetes/layer-7-routing.md b/ee/ucp/kubernetes/layer-7-routing.md index d12a15a961..25808e6f5f 100644 --- a/ee/ucp/kubernetes/layer-7-routing.md +++ b/ee/ucp/kubernetes/layer-7-routing.md @@ -1,7 +1,6 @@ --- title: Layer 7 routing -description: Learn how to route traffic to your Kubernetes workloads in - Docker Enterprise Edition. +description: Learn how to route traffic to your Kubernetes workloads in Docker Enterprise Edition. keywords: UCP, Kubernetes, ingress, routing redirect_from: - /ee/ucp/kubernetes/deploy-ingress-controller/ @@ -19,285 +18,8 @@ Use an ingress controller when you want to: * Give your Kubernetes app an externally-reachable URL. * Load-balance traffic to your app. -Kubernetes provides an NGINX ingress controller that you can use in Docker EE -without modifications. -Learn about [ingress in Kubernetes](https://v1-8.docs.kubernetes.io/docs/concepts/services-networking/ingress/). +A popular ingress controller within the Kubernetes Community is the [NGINX controller](https://github.com/kubernetes/ingress-nginx), and can be used in Docker Enterprise Edition, but it is not directly supported by Docker, Inc. -## Create a dedicated namespace +Learn about [ingress in Kubernetes](https://v1-11.docs.kubernetes.io/docs/concepts/services-networking/ingress/). -1. Navigate to the **Namespaces** page, and click **Create**. -2. In the **Object YAML** editor, append the following text. - ```yaml - metadata: - name: ingress-nginx - ``` - - The finished YAML should look like this. - - ```yaml - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - ``` -3. Click **Create**. -4. In the **ingress-nginx** namespace, click the **More options** icon, - and in the context menu, select **Set Context**. - - ![](../images/deploy-ingress-controller-1.png){: .with-border} - -## Create a grant - -The default service account that's associated with the `ingress-nginx` -namespace needs access to Kubernetes resources, so create a grant with -`Restricted Control` permissions. - -1. From UCP, navigate to the **Grants** page, and click **Create Grant**. -2. Within the **Subject** pane, select **Service Account**. For the - **Namespace** select **ingress-nginx**, and select **default** for - the **Service Account**. Click **Next**. -3. Within the **Role** pane, select **Restricted Control**, and then click - **Next**. -4. Within the **Resource Set** pane, select the **Type** **Namespace**, and - select the **Apply grant to all existing and new namespaces** toggle. -5. Click **Create**. - -## Deploy NGINX ingress controller - -The cluster is ready for the ingress controller deployment, which has three -main components: - -- a simple HTTP server, named `default-http-backend`, -- an ingress controller, named `nginx-ingress-controller`, and -- a service that exposes the app, named `ingress-nginx`. - -Navigate to the **Create Kubernetes Object** page, and in the **Object YAML** -editor, paste the following YAML. - -```yaml -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - name: default-http-backend - labels: - app: default-http-backend - namespace: ingress-nginx -spec: - replicas: 1 - selector: - matchLabels: - app: default-http-backend - template: - metadata: - labels: - app: default-http-backend - annotations: - seccomp.security.alpha.kubernetes.io/pod: docker/default - spec: - terminationGracePeriodSeconds: 60 - containers: - - name: default-http-backend - # Any image is permissible as long as: - # 1. It serves a 404 page at / - # 2. It serves 200 on a /healthz endpoint - image: gcr.io/google_containers/defaultbackend:1.4 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - ports: - - containerPort: 8080 - resources: - limits: - cpu: 10m - memory: 20Mi - requests: - cpu: 10m - memory: 20Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: default-http-backend - namespace: ingress-nginx - labels: - app: default-http-backend -spec: - ports: - - port: 80 - targetPort: 8080 - selector: - app: default-http-backend ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tcp-services - namespace: ingress-nginx ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: udp-services - namespace: ingress-nginx ---- -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - name: nginx-ingress-controller - namespace: ingress-nginx -spec: - replicas: 1 - selector: - matchLabels: - app: ingress-nginx - template: - metadata: - labels: - app: ingress-nginx - annotations: - prometheus.io/port: '10254' - prometheus.io/scrape: 'true' - seccomp.security.alpha.kubernetes.io/pod: docker/default - spec: - initContainers: - - command: - - sh - - -c - - sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range="1024 65535" - image: alpine:3.6 - imagePullPolicy: IfNotPresent - name: sysctl - securityContext: - privileged: true - containers: - - name: nginx-ingress-controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.20.0 - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-http-backend - - --configmap=$(POD_NAMESPACE)/nginx-configuration - - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services - - --udp-services-configmap=$(POD_NAMESPACE)/udp-services - - --annotations-prefix=nginx.ingress.kubernetes.io - - --enable-ssl-passthrough - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 ---- -apiVersion: v1 -kind: Service -metadata: - name: ingress-nginx - namespace: ingress-nginx -spec: - type: NodePort - ports: - - name: http - port: 80 - targetPort: 80 - protocol: TCP - - name: https - port: 443 - targetPort: 443 - protocol: TCP - selector: - app: ingress-nginx -``` - -## Check your deployment - -The `default-http-backend` provides a simple service that serves a 404 page -at `/` and serves 200 on the `/healthz` endpoint. - -1. Navigate to the **Controllers** page and confirm that the - **default-http-backend** and **nginx-ingress-controller** objects are - scheduled. - - > Scheduling latency - > - > It may take several seconds for the HTTP backend and the ingress controller's - > `Deployment` and `ReplicaSet` objects to be scheduled. - {: .important} - - ![](../images/deploy-ingress-controller-2.png){: .with-border} - -2. When the workload is running, navigate to the **Load Balancers** page - and click the **ingress-nginx** service. - - ![](../images/deploy-ingress-controller-3.png){: .with-border} - -3. In the details pane, click the first URL in the **Ports** section. - - A new page opens, displaying `default backend - 404`. - -## Check your deployment from the CLI - -From the command line, confirm that the deployment is running by using -`curl` with the URL that's shown on the details pane of the **ingress-nginx** -service. - -```bash -curl -I http://:/ -``` - -This command returns the following result. - -``` -HTTP/1.1 404 Not Found -Server: nginx/1.13.8 -``` - -Test the server's health ping service by appending `/healthz` to the URL. - -```bash -curl -I http://:/healthz -``` - -This command returns the following result. - -``` -HTTP/1.1 200 OK -Server: nginx/1.13.8 -``` +For an example of a YAML NGINX kube ingress deployment, refer to . diff --git a/ee/ucp/kubernetes/configure-aws-storage.md b/ee/ucp/kubernetes/storage/configure-aws-storage.md similarity index 95% rename from ee/ucp/kubernetes/configure-aws-storage.md rename to ee/ucp/kubernetes/storage/configure-aws-storage.md index 179edc536c..b13117b31c 100644 --- a/ee/ucp/kubernetes/configure-aws-storage.md +++ b/ee/ucp/kubernetes/storage/configure-aws-storage.md @@ -2,6 +2,8 @@ title: Configure AWS EBS Storage for Kubernetes description: Learn how configure AWS EBS storage for Kubernetes clusters. keywords: UCP, Docker Enterprise, Kubernetes, storage, AWS, ELB +redirect_from: +- /ee/ucp/kubernetes/configure-aws-storage/ --- [AWS Elastic Block Store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) (EBS) can be deployed with Kubernetes in Docker Enterprise 2.1 to use AWS volumes as peristent storage for applications. Before using EBS volumes, configure UCP and the AWS infrastructure for storage orchestration to function. @@ -131,3 +133,8 @@ pvc-751c006e-a00b-11e8-8007-0242ac110012 1Gi RWO Retain The AWS console shows a volume has been provisioned having a matching name with type `gp2` and a `1GiB` size. ![](../images/aws-ebs.png) + +## Where to go next + +- [Deploy an Ingress Controller on Kubernetes](/ee/ucp/kubernetes/layer-7-routing/) +- [Discover Network Encryption on Kubernetes](/ee/ucp/kubernetes/kubernetes-network-encryption/) \ No newline at end of file diff --git a/ee/ucp/kubernetes/storage/use-nfs-volumes.md b/ee/ucp/kubernetes/storage/use-nfs-volumes.md new file mode 100644 index 0000000000..f2561a1ca7 --- /dev/null +++ b/ee/ucp/kubernetes/storage/use-nfs-volumes.md @@ -0,0 +1,257 @@ +--- +title: Configuring NFS Storage for Kubernetes +description: Learn how to add support for NFS persistent storage by adding a default storage class. +keywords: Universal Control Plane, UCP, Docker EE, Kubernetes, storage, volume +redirect_from: +- /ee/ucp/admin/configure/use-nfs-volumes/ +--- + +Users can provide persistent storage for workloads running on Docker Enterprise +by using NFS storage. These NFS shares, when mounted into the running container, +provide state to the application, managing data external to the container's +lifecycle. + +> Note: Provisioning an NFS server and exporting an NFS share are out of scope +> for this guide. Additionally, using external [Kubernetes +> plugins](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs) +> to dynamically provision NFS shares, is also out of scope for this guide. + +To mount existing NFS shares within Kubernetes Pods, we have 2 options: + - Define NFS shares within the Pod definitions. NFS shares are defined + manually by each tenant when creating a workload. + - Define NFS shares as a Cluster object through Persistent Volumes, with + the Cluster object lifecycle handled separately from the workload. This is common for + operators who want to define a range of NFS shares for tenants to request and + consume. + +## Defining NFS Shares in the Pod definition + +When defining workloads in Kubernetes manifest files, an end user can directly +reference the NFS shares to mount inside of each Pod. The NFS share is defined +within the Pod specification, which could be a standalone pod, or could be +wrapped in a higher-level object like a Deployment, Daemonset, or StatefulSet. + +The following example includes a running UCP cluster and a downloaded +[client bundle](../../user-access/cli/#download-client-certificates) with +permission to schedule pods in a namespace. + +Here is an example pod specification with an NFS volume defined: + +```bash +$ cat nfs-in-a-pod.yaml +kind: Pod +apiVersion: v1 +metadata: + name: nfs-in-a-pod +spec: + containers: + - name: app + image: alpine + volumeMounts: + - name: nfs-volume + mountPath: /var/nfs # Please change the destination you like the share to be mounted too + command: ["/bin/sh"] + args: ["-c", "sleep 500000"] + volumes: + - name: nfs-volume + nfs: + server: nfs.example.com # Please change this to your NFS server + path: /share1 # Please change this to the relevant share +``` + +To deploy the pod, and ensure that it started up correctly, use the [kubectl](../../user-access/kubectl/) command line tool. + +```bash +$ kubectl create -f nfsinapod.yaml + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +nfs-in-a-pod 1/1 Running 0 6m +``` + +Verify everything was mounted correctly by getting a shell prompt +within the container and searching for your mount. + +```bash +$ kubectl exec -it nfs-in-a-pod sh +/ # +/ # mount | grep nfs.example.com +nfs.example.com://share1 on /var/nfs type nfs4 (rw,relatime,vers=4.0,rsize=262144,wsize=262144,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.31.42.23,local_lock=none,addr=nfs.example.com) +/ # +``` + +Because you defined the NFS share as part of the Pod spec, neither UCP nor Kubernetes +knows anything about this NFS share. This means that when the pod gets +deleted, the NFS share is unattached from the Cluster. However, the data remains in the NFS share. + +## Exposing NFS shares as a Cluster Object + +For this method, use the Kubernetes Objects [Persistent +Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) +and [Persistent Volume +Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) +to manage the lifecycle and access to NFS Shares. + +Here you can define multiple shares for a tenant to use within the +cluster. The [Persistent +Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes) +is a cluster wide object, so it can be pre-provisioned. A +[Persistent Volume +Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) +is a claim by a tenant for use of a Persistent Volume within their namespace. + +> Note: In this case, 'NFS share lifecycle' is referring to granting and removing the +> end user's ability to consume NFS storage, not managing the lifecycle +> of the NFS Server. + +### Persistent Volume + +Define the Persistent Volume at the cluster level: + +```bash +$ cat pvwithnfs.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: my-nfs-share +spec: + capacity: + storage: 5Gi # This size is used to match a volume to a tenents claim + accessModes: + - ReadWriteOnce # Access modes are defined below + persistentVolumeReclaimPolicy: Recycle # Reclaim policies are defined below + nfs: + server: nfs.example.com # Please change this to your NFS server + path: /share1 # Please change this to the relevant share +``` + +To create Persistent Volume objects at the Cluster level, you need a [Cluster +Role +Binding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) +grant. Again use the [kubectl](../../user-access/kubectl/) command line tool to create the +volume: + +``` +$ kubectl create -f pvwithnfs.yaml + +$ kubectl get pv +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE + +my-nfs-share 5Gi RWO Recycle Available slow 7s +``` + +#### Access Modes + +The access mode for a NFS Persistent Volume can be any of the following modes: + +- ***ReadWriteOnce*** – the volume can be mounted as read-write by a single node. +- ***ReadOnlyMany*** – the volume can be mounted read-only by many nodes. +- ***ReadWriteMany*** – the volume can be mounted as read-write by many nodes. + +The access mode in the Persistent Volume definition is used to match a +Persistent Volume to a Claim. When a Persistent Volume is defined and created +inside of Kubernetes, a Volume is not mounted. See [access +modes in the Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes), +for more details. + +#### Reclaim + +The [reclaim +policy](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming) +is used to define what the cluster should do after a Persistent Volume has been +released from a Claim. A Persistent Volume Reclaim policy could be: Reclaim, +Recycle and Delete. See [Reclaiming in the Kubernetes +documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming) +for a deeper understanding. + +### Persistent Volume Claim + +A tenant can now "claim" a Persistent Volume for use within their workloads +by using a Kubernetes Persistent Volume Claim. A Persistent Volume Claim resides within a namespace, +and it attempts to match available Persistent Volumes +to what a tenant has requested. + +``` bash +$ cat myapp-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: myapp-nfs + namespace: default +spec: + accessModes: + - ReadWriteOnce # Access modes for volumes is defined under Persistent Volumes + resources: + requests: + storage: 5Gi # volume size requested +``` + +A tenant with a +[RoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) +to create Persistent Volume Claims can deploy this Persistent +Volume Claim. If there is a Persistent Volume that meets the tenant's +criteria, Kubernetes binds the Persistent Volume to the Claim. Again, this does not mount the share. + +```bash +$ kubectl create -f myapp-claim.yaml +persistentvolumeclaim "myapp-nfs" created + +$ kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +myapp-nfs Bound my-nfs-share 5Gi RWO slow 2s + +$ kubectl get pv +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +my-nfs-share 5Gi RWO Recycle Bound default/myapp-nfs slow 4m +``` + +### Defining a workload + +Finally, a tenant can deploy a workload to consume the Persistent Volume Claim. +The Persistent Volume Claim is defined within the Pod specification, which could +be a standalone pod or could be wrapped in a higher-level object like a +Deployment, Daemonset, or StatefulSet. + +```bash +$ cat myapp-pod.yaml +kind: Pod +apiVersion: v1 +metadata: + name: pod-using-nfs +spec: + containers: + - name: app + image: alpine + volumeMounts: + - name: data + mountPath: /var/nfs # Please change the destination you like the share to be mounted too + command: ["/bin/sh"] + args: ["-c", "sleep 500000"] + volumes: + - name: data + persistentVolumeClaim: + claimName: myapp-nfs +``` + +The pod can be deployed by a tenant using the +[kubectl](../../user-access/kubectl/) command line tool. Additionally, you can +verify that the pod is running successfully and that the NFS share has been mounted +inside of the container. + +```bash +$ kubectl create -f myapp-pod.yaml + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +pod-using-nfs 1/1 Running 0 1m + +$ kubectl exec -it pod-using-nfs sh +/ # mount | grep nfs.example.com +nfs.example.com://share1 on /var/nfs type nfs4 (rw,relatime,vers=4.1,rsize=262144,wsize=262144,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.31.42.23,local_lock=none,addr=nfs.example.com) +/ # +``` + +## Where to go next + +- [Deploy an Ingress Controller on Kubernetes](/ee/ucp/kubernetes/layer-7-routing/) +- [Discover Network Encryption on Kubernetes](/ee/ucp/kubernetes/kubernetes-network-encryption/) diff --git a/ee/ucp/release-notes.md b/ee/ucp/release-notes.md index 7b3be9a3ad..d8b66cd54f 100644 --- a/ee/ucp/release-notes.md +++ b/ee/ucp/release-notes.md @@ -21,28 +21,168 @@ upgrade your installation to the latest release. # Version 3.1 +## 3.1.7 +(2019-05-06) + +### Security +* Refer to [UCP image vulnerabilities](https://success.docker.com/article/ucp-image-vulnerabilities) for details regarding actions to be taken, timeline, and any status updates/issues/recommendations. + +### Bug Fixes +* Updated the UCP base image layers to fix a number of old libraries and components that had security vulnerabilities. + +### Known Issues +* Upgrading from UCP `3.1.4` to `3.1.5` causes missing Swarm placement constraints banner for some Swarm services (ENGORC-2191). This can cause Swarm services to run unexpectedly on Kubernetes nodes. See https://www.docker.com/ddc-41 for more information. + - Workaround: Delete any `ucp-*-s390x` Swarm services. For example, `ucp-auth-api-s390x`. +* There are important changes to the upgrade process that, if not correctly followed, can impact the availability of applications running on the Swarm during uprades. These constraints impact any upgrades coming from any Docker Engine version before 18.09 to version 18.09 or greater. For more information about about upgrading Docker Enterprise to version 2.1, see [Upgrade Docker](../upgrade). +* To deploy Pods with containers using Restricted Parameters, the user must be an admin and a service account must explicitly have a **ClusterRoleBinding** with `cluster-admin` as the **ClusterRole**. Restricted Parameters on Containers include: + * Host Bind Mounts + * Privileged Mode + * Extra Capabilities + * Host Networking + * Host IPC + * Host PID +* If you delete the built-in **ClusterRole** or **ClusterRoleBinding** for `cluster-admin`, restart the `ucp-kube-apiserver` container on any manager node to recreate them. (#14483) +* Pod Security Policies are not supported in this release. (#15105) +* The default Kubelet configuration for UCP Manager nodes is expecting 4GB of free disk space in the `/var` partition. See [System Requirements](/ee/ucp/admin/install/system-requirements) for details. + +### Components + +| Component | Version | +| ----------- | ----------- | +| UCP | 3.1.7 | +| Kubernetes | 1.11.9 | +| Calico | 3.5.3 | +| Interlock (nginx) | 1.14.0 | + +## 3.1.6 +(2019-04-11) + +### Kubernetes +* Updated Kubernetes to version 1.11.9. + +### Networking +* Updated Calico to version 3.5.3. + +### Authentication and Authorization +* Accessing the `ListAccount` API endpoint now requires an admin user. Accessing the `GetAccount` API endpoint now requires an admin user, the actual user, or a member of the organization being inspected. [ENGORC-100](https://docker.atlassian.net/browse/ENGORC-100) + +### Known Issues +* Upgrading from UCP `3.1.4` to `3.1.5` causes missing Swarm placement constraints banner for some Swarm services (ENGORC-2191). This can cause Swarm services to run unexpectedly on Kubernetes nodes. See https://www.docker.com/ddc-41 for more information. + - Workaround: Delete any `ucp-*-s390x` Swarm services. For example, `ucp-auth-api-s390x`. +* There are important changes to the upgrade process that, if not correctly followed, can impact the availability of applications running on the Swarm during uprades. These constraints impact any upgrades coming from any Docker Engine version before 18.09 to version 18.09 or greater. For more information about about upgrading Docker Enterprise to version 2.1, see [Upgrade Docker](../upgrade). +* To deploy Pods with containers using Restricted Parameters, the user must be an admin and a service account must explicitly have a **ClusterRoleBinding** with `cluster-admin` as the **ClusterRole**. Restricted Parameters on Containers include: + * Host Bind Mounts + * Privileged Mode + * Extra Capabilities + * Host Networking + * Host IPC + * Host PID +* If you delete the built-in **ClusterRole** or **ClusterRoleBinding** for `cluster-admin`, restart the `ucp-kube-apiserver` container on any manager node to recreate them. (#14483) +* Pod Security Policies are not supported in this release. (#15105) +* The default Kubelet configuration for UCP Manager nodes is expecting 4GB of free disk space in the `/var` partition. See [System Requirements](/ee/ucp/admin/install/system-requirements) for details. + +### Components + +| Component | Version | +| ----------- | ----------- | +| UCP | 3.1.6 | +| Kubernetes | 1.11.9 | +| Calico | 3.5.3 | +| Interlock (nginx) | 1.14.0 | + +## 3.1.5 +2019-03-28 + +### Kubernetes +* Updated Kubernetes to version 1.11.8. (ENGORC-2024) + +### Networking +* Updated Calico to version 3.5.2. (ENGORC-2045) + +### Authentication and Authorization +* Added LDAP Settings API to the list of publicly documented API endpoints. (ENGORC-98) +* Added a new `exclude_server_identity_headers` field to the UCP config. If set to true, the headers are not included in UCP API responses. (docker/orca#16039) +* Hid most of the UCP banners for non-admin users. (docker/orca#14631) +* When LDAP or SAML is enabled, provided admin users an option to disable managed password authentication, which includes login and creation of new users. (ENGORC-1999) + +### Bug Fixes +* Changed Interlock proxy service default `update-action-failure` to rollback. (ENGCORE-117) +* Added validation for service configuration label values. (ENGCORE-114) +* Fixed an issue with continuous interlock reconciliation if `ucp-interlock` service image does not match expected version. (ENGORC-2081) + +### Known Issues + +* Upgrading from UCP 3.1.4 to 3.1.5 causes missing Swarm placement constraints banner for some Swarm services (ENGORC-2191). This can cause Swarm services to run unexpectedly on Kubernetes nodes. See https://www.docker.com/ddc-41 for more information. + - Workaround: Delete any `ucp-*-s390x` Swarm services. For example, `ucp-auth-api-s390x`. +* There are important changes to the upgrade process that, if not correctly followed, can impact the availability of applications running on the Swarm during uprades. These constraints impact any upgrades coming from any Docker Engine version before 18.09 to version 18.09 or greater. For more information about about upgrading Docker Enterprise to version 2.1, see [Upgrade Docker](../upgrade) +* To deploy Pods with containers using Restricted Parameters, the user must be an admin and a service account must explicitly have a **ClusterRoleBinding** with `cluster-admin` as the **ClusterRole**. Restricted Parameters on Containers include: + * Host Bind Mounts + * Privileged Mode + * Extra Capabilities + * Host Networking + * Host IPC + * Host PID +* If you delete the built-in **ClusterRole** or **ClusterRoleBinding** for `cluster-admin`, restart the `ucp-kube-apiserver` container on any manager node to recreate them. (#14483) +* Pod Security Policies are not supported in this release. (#15105) +* The default Kubelet configuration for UCP Manager nodes is expecting 4GB of free disk space in the `/var` partition. See [System Requirements](/ee/ucp/admin/install/system-requirements) for details. + +### Components + +| Component | Version | +| ----------- | ----------- | +| UCP | 3.1.5 | +| Kubernetes | 1.11.8 | +| Calico | 3.5.2 | +| Interlock (nginx) | 1.14.0 | + ## 3.1.4 2019-02-28 -**New platforms** +### New platforms * Added support for SLES 15. * Added support for Oracle 7.6. - **Kubernetes** +### Kubernetes * Kubernetes has been updated to version 1.11.7. (docker/orca#16157) - **Bug Fixes** +### Bug Fixes * Bump the Golang version that is used to build UCP to version 1.10.8. (docker/orca#16068) * Fixed an issue that caused UCP upgrade failure to upgrade with Interlock deployment. (docker/orca#16009) * Fixed an issue that caused Windows node ucp-agent(s) to constantly reboot when audit logging is enabled. (docker/orca#16122) * Fixed an issue to ensure that non-admin user actions (with the RestrictedControl role) against RBAC resources are read-only. (docker/orca#16121) * Fixed an issue to prevent UCP users from updating services with a port that conflicts with the UCP controller port. (escalation#855) * Fixed an issue to validate Calico certs expiration dates and update accordingly. (escalation#981) +* Kubelet no longer deletes images, starting with the oldest unused images, after exceeding 85% disk space utilization. This was an issue in air-gapped environments. (docker/orca#16082) -**Enhancements** +### Enhancements * Changed packaging and builds for UCP to build bootstrapper last. This avoids the "upgrade available" banner on all UCPs until the entirety of UCP is available. +### Known Issues + +* Newly added Windows node reports "Awaiting healthy status in classic node inventory". [Learn more](https://success.docker.com/article/newly-added-windows-node-reports-awaiting-healthy-status-in-classic-node-inventory). +* There are important changes to the upgrade process that, if not correctly followed, can impact the availability of applications running on the Swarm during uprades. These constraints impact any upgrades coming from any Docker Engine version before 18.09 to version 18.09 or greater. For more information about about upgrading Docker Enterprise to version 2.1, see [Upgrade Docker](../upgrade) +* In the UCP web interface, LDAP settings disappear after submitting them. However, the settings are properly saved. (docker/orca#15503) +* To deploy Pods with containers using Restricted Parameters, the user must be an admin and a service account must explicitly have a **ClusterRoleBinding** with `cluster-admin` as the **ClusterRole**. Restricted Parameters on Containers include: + * Host Bind Mounts + * Privileged Mode + * Extra Capabilities + * Host Networking + * Host IPC + * Host PID +* If you delete the built-in **ClusterRole** or **ClusterRoleBinding** for `cluster-admin`, restart the `ucp-kube-apiserver` container on any manager node to recreate them. (docker/orca#14483) +* Pod Security Policies are not supported in this release. (docker/orca#15105) +* The default Kubelet configuration for UCP Manager nodes is expecting 4GB of free disk space in the `/var` partition. See [System Requirements](/ee/ucp/admin/install/system-requirements) for details. + +### Components + +| Component | Version | +| ----------- | ----------- | +| UCP | 3.1.4 | +| Kubernetes | 1.11.7 | +| Calico | 3.5.0 | +| Interlock (nginx) | 1.14.0 | + ## 3.1.3 2019-01-29 @@ -82,8 +222,7 @@ upgrade your installation to the latest release. ### Authentication and Authorization * SAML Single Logout is now supported in UCP. -* Identity Provider initiated SAML Single Sign-on is now supported in UCP. The admin can -enable this feature in Admin Settings -> SAML Settings. +* Identity Provider initiated SAML Single Sign-on is now supported in UCP. The admin can enable this feature in Admin Settings -> SAML Settings. ### Audit Logging * UCP Audit logging is now controlled through the UCP Configuration file; it is also @@ -91,16 +230,19 @@ now configurable within the UCP web interface. (#15466) ### Bug Fixes * Core - * Significantly reduced database load in environments with a lot of concurrent - and repeated API requests by the same user. (docker/escalation#911) + * Significantly reduced database load in environments with a lot of concurrent and repeated API requests by the same user. (docker/escalation#911) * UCP backend will now complain when a service is created/updated if the - `com.docker.lb.network` label is not correctly specified. (docker/orca#15015) + `com.docker.lb.network` label is not correctly specified. (docker/orca#15015) * LDAP group member attribute is now case insensitive. (docker/escalation#917) * Interlock * Interlock headers can now be hidden. (escalation#833) * Now upgrading Interlock will also upgrade interlock proxy and interlock extension as well (escalation/871) * Added support for 'VIP' backend mode, in which the Interlock proxy connects to the backend service's Virtual IP instead of load-balancing directly to each task IP. (docker/interlock#206) (escalation/920) +### Known Issues + * In the UCP web interface, LDAP settings disappear after submitting them. However, the settings are properly saved. (docker/orca#15503) + * By default, Kubelet begins deleting images, starting with the oldest unused images, after exceeding 85% disk space utilization. This causes an issue in an air-gapped environment. (docker/orca#16082) + ### Components | Component | Version | @@ -129,7 +271,7 @@ now configurable within the UCP web interface. (#15466) 2018-11-08 -## Bug Fixes +### Bug Fixes * Swarm placement constraint warning banner no longer shows up for `ucp-auth` services (#14539) * "update out of sequence" error messages no longer appear when changing admin settings (#7093) @@ -138,7 +280,7 @@ now configurable within the UCP web interface. (#15466) * `docker network ls --filter id=` now works with a UCP client bundle (#14840) * Collection deletes are correctly blocked if there is a node in the collection (#13704) -## New Features +### New Features ### Kubernetes @@ -168,34 +310,26 @@ Admins can configure UCP to use a SAML-enabled identity provider for user authen * UCP now stores its configurations in its internal key-value store instead of in a Swarm configuration so changes can propagate across the cluster more quickly. * You can now use the `custom_api_server_headers` field in the UCP configuration to set arbitrary headers that are included with every UCP response. - - -## API updates +### API updates There are several backward-incompatible changes in the Kubernetes API that may affect user workloads. They are: - * A compatibility issue with the `allowPrivilegeEscalation` field that caused policies to start denying pods they previously allowed was fixed. If you defined `PodSecurityPolicy` objects using a 1.8.0 client or server and set `allowPrivilegeEscalation` to false, these objects must be reapplied after you upgrade. - * These changes are automatically updated for taints. Tolerations for these taints must be updated manually. Specifically, you must: +* A compatibility issue with the `allowPrivilegeEscalation` field that caused policies to start denying pods they previously allowed was fixed. If you defined `PodSecurityPolicy` objects using a 1.8.0 client or server and set `allowPrivilegeEscalation` to false, these objects must be reapplied after you upgrade. +* These changes are automatically updated for taints. Tolerations for these taints must be updated manually. Specifically, you must: * Change `node.alpha.kubernetes.io/notReady` to `node.kubernetes.io/not-ready` * Change `node.alpha.kubernetes.io/unreachable` to `node.kubernetes.io/unreachable` For more information about taints and tolerations, see [Taints and Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/). - * JSON configuration used with `kubectl create -f pod.json` containing fields with incorrect casing are no longer valid. You must correct these files before upgrading. When specifying keys in JSON resource definitions during direct API server communication, the keys are case-sensitive. A bug introduced in Kubernetes 1.8 caused the API server to accept a request with incorrect case and coerce it to correct case, but this behaviour has been fixed in 1.11 so the API server will again enforce correct casing. During this time, the `kubectl` tool continued to enforce case-sensitive keys, so users that strictly manage resources with `kubectl` will be unaffected by this change. * If you have a pod with a subpath volume PVC, there’s a chance that after the upgrade, it will conflict with some other pod; see [this pull request](https://github.com/kubernetes/kubernetes/pull/61373). It’s not clear if this issue will just prevent those pods from starting or if the whole cluster will fail. - - -## Known issues +### Known issues * There are important changes to the upgrade process that, if not correctly followed, can impact the availability of applications running on the Swarm during uprades. These constraints impact any upgrades coming from any Docker Engine version before 18.09 to version 18.09 or greater. For more information about about upgrading Docker Enterprise to version 2.1, see [Upgrade Docker](../upgrade) - * In the UCP web interface, LDAP settings disappear after submitting them. However, the settings are properly saved. (#15503) - * You must use the ID of the user, organization, or team if you manually create a **ClusterRoleBinding** or **RoleBinding** for `User` or `Group` subjects. (#14935) * For the `User` subject Kind, the `Name` field contains the ID of the user. * For the `Group` subject Kind, the format depends on whether you are create a Binding for a team or an organization: * For an organization, the format is `org:{org-id}` * For a team, the format is `team:{org-id}:{team-id}` - * To deploy Pods with containers using Restricted Parameters, the user must be an admin and a service account must explicitly have a **ClusterRoleBinding** with `cluster-admin` as the **ClusterRole**. Restricted Parameters on Containers include: * Host Bind Mounts * Privileged Mode @@ -203,14 +337,11 @@ There are several backward-incompatible changes in the Kubernetes API that may a * Host Networking * Host IPC * Host PID - * If you delete the built-in **ClusterRole** or **ClusterRoleBinding** for `cluster-admin`, restart the `ucp-kube-apiserver` container on any manager node to recreate them. (#14483) - * Pod Security Policies are not supported in this release. (#15105) - * The default Kubelet configuration for UCP Manager nodes is expecting 4GB of free disk space in the `/var` partition. See [System Requirements](/ee/ucp/admin/install/system-requirements) for details. -## Deprecated features +### Deprecated features The following features are deprecated in UCP 3.1. @@ -234,16 +365,41 @@ The following features are deprecated in UCP 3.1. # Version 3.0 +## 3.0.11 + +2019-05-06 + +### Bug Fixes +* Updated the UCP base image layers to fix a number of old libraries and components that had security vulnerabilities. + +### Components + +| Component | Version | +| ----------- | ----------- | +| UCP | 3.0.11 | +| Kubernetes | 1.8.15 | +| Calico | 3.0.8 | +| Interlock (nginx) | 1.13.12 | + ## 3.0.10 2019-02-28 - **Bug Fixes** +### Bug Fixes * Bump the Golang version that is used to build UCP to version 1.10.8. * Prevent UCP users from updating services with a port that conflicts with the UCP controller port. (escalation#855) * Fixed an issue that causes UCP fail to upgrade with Interlock deployment. (docker/orca/#16009) * Validate Calico certs expiration date and update accordingly. (escalation#981) +### Components + +| Component | Version | +| ----------- | ----------- | +| UCP | 3.0.10 | +| Kubernetes | 1.8.15 | +| Calico | 3.0.8 | +| Interlock (nginx) | 1.13.12 | + ## 3.0.9 2018-01-29 @@ -381,8 +537,7 @@ The following features are deprecated in UCP 3.1. ### Bug fixes * Security - * Fixed a critical security issue where the LDAP bind username and password - were stored in cleartext on UCP hosts. Please refer to [this KB article](https://success.docker.com/article/upgrading-to-ucp-2-2-12-ucp-3-0-4/) for proper implementation of this fix. + * Fixed a critical security issue where the LDAP bind username and password were stored in cleartext on UCP hosts. Please refer to [this KB article](https://success.docker.com/article/upgrading-to-ucp-2-2-12-ucp-3-0-4/) for proper implementation of this fix. ### Known Issue @@ -547,7 +702,7 @@ potential resource contention issues. a UCP client bundle and `kubectl`. [Learn more](https://docs.docker.com/ee/ucp/kubernetes/). * Users can now use Compose to deploy Kubernetes workloads from the web UI. -[Lean more](https://docs.docker.com/ee/ucp/kubernetes/deploy-with-compose/). +[Learn more](https://docs.docker.com/ee/ucp/kubernetes/deploy-with-compose/). ### Networking @@ -579,7 +734,7 @@ will be available in future releases. ### Security * Role-based access control now supports Kubernetes resources. -[Lean more](https://docs.docker.com/ee/ucp/authorization/migrate-kubernetes-roles/). +[Learn more](https://docs.docker.com/ee/ucp/authorization/migrate-kubernetes-roles/). * In addition to users, teams, organizations, and grants you can now use Kubernetes Service Accounts as a subject type. [Learn more](https://docs.docker.com/ee/ucp/kubernetes/create-service-account/). @@ -684,11 +839,35 @@ deprecated. Deploy your applications as Swarm services or Kubernetes workloads. # Version 2.2 +## Version 2.2.18 + +2019-05-06 + +### Bug Fixes +* Updated the UCP base image layers to fix a number of old libraries and components that had security vulnerabilities. + +### Known issues + +* Docker currently has limitations related to overlay networking and services using VIP-based endpoints. These limitations apply to use of the HTTP Routing Mesh (HRM). HRM users should familiarize themselves with these limitations. In particular, HRM may encounter virtual IP exhaustion (as evidenced by `failed to allocate network IP for task` Docker log messages). If this happens, and if the HRM service is restarted or rescheduled for any reason, HRM may fail to resume operation automatically. See the Docker EE 17.06-ee5 release notes for details. +* The Swarm admin web interface for UCP versions 2.2.0 and later contain a bug. If used with Docker Engine version 17.06.2-ee5 or earlier, attempting to update "Task History Limit", "Heartbeat Period" and "Node Certificate Expiry" settings using the UI will cause the cluster to crash on next restart. Using UCP 2.2.X and Docker Engine 17.06-ee6 and later, updating these settings will fail (but not cause the cluster to crash). Users are encouraged to update to Docker Engine version 17.06.2-ee6 and later, and to use the Docker CLI (instead of the UCP UI) to update these settings. Rotating join tokens works with any combination of Docker Engine and UCP versions. Docker Engine versions 17.03 and earlier (which use UCP version 2.1 and earlier) are not affected by this problem. +* Upgrading heterogeneous swarms from CLI may fail because x86 images are used +instead of the correct image for the worker architecture. +* Agent container log is empty even though it's running correctly. +* Rapid UI settings updates may cause unintended settings changes for logging + settings and other admin settings. +* Attempting to load an (unsupported) `tar.gz` image results in a poor error + message. +* Searching for images in the UCP images UI doesn't work. +* Removing a stack may leave orphaned volumes. +* Storage metrics are not available for Windows. +* You can't create a bridge network from the web interface. As a workaround use + `/`. + ## Version 2.2.17 2019-02-28 - **Bug Fixes** +### Bug Fixes * Bump the Golang version that is used to build UCP to version 1.10.8. * Prevent UCP users from updating services with a port that conflicts with the UCP controller port. (escalation#855) diff --git a/ee/ucp/ucp-architecture.md b/ee/ucp/ucp-architecture.md index 110e4b649f..0cd2b314a4 100644 --- a/ee/ucp/ucp-architecture.md +++ b/ee/ucp/ucp-architecture.md @@ -68,8 +68,8 @@ on a node depend on whether the node is a manager or a worker. Internally, UCP uses the following components: -* Calico v3.5 -* Kubernetes v1.11.5 +* Calico v3.5.3 +* Kubernetes v1.11.9 ### UCP components in manager nodes @@ -87,7 +87,7 @@ persist the state of UCP. These are the UCP services running on manager nodes: | k8s_POD_kube-dns | Pause container for the `kube-dns` pod. | | k8s_ucp-dnsmasq-nanny | A dnsmasq instance used in the Kubernetes DNS Service. Part of the `kube-dns` deployment. Runs on one manager node only. | | k8s_ucp-kube-compose | A custom Kubernetes resource component that's responsible for translating Compose files into Kubernetes constructs. Part of the `compose` deployment. Runs on one manager node only. | -| k8s_ucp-kube-dns | The main Kubernetes DNS Service, used by pods to [resolve service names](https://v1-8.docs.kubernetes.io/docs/concepts/services-networking/dns-pod-service/). Part of the `kube-dns` deployment. Runs on one manager node only. Provides service discovery for Kubernetes services and pods. A set of three containers deployed via Kubernetes as a single pod. | +| k8s_ucp-kube-dns | The main Kubernetes DNS Service, used by pods to [resolve service names](https://v1-11.docs.kubernetes.io/docs/concepts/services-networking/dns-pod-service/). Part of the `kube-dns` deployment. Runs on one manager node only. Provides service discovery for Kubernetes services and pods. A set of three containers deployed via Kubernetes as a single pod. | | k8s_ucp-kubedns-sidecar | Health checking and metrics daemon of the Kubernetes DNS Service. Part of the `kube-dns` deployment. Runs on one manager node only. | | ucp-agent | Monitors the node and ensures the right UCP services are running. | | ucp-auth-api | The centralized service for identity and authentication used by UCP and DTR. | diff --git a/ee/ucp/user-access/cli.md b/ee/ucp/user-access/cli.md index 4d52ade24d..afcfcfc78f 100644 --- a/ee/ucp/user-access/cli.md +++ b/ee/ucp/user-access/cli.md @@ -3,7 +3,6 @@ title: CLI-based access description: Learn how to access Docker Universal Control Plane from the CLI. keywords: ucp, cli, administration redirect_from: - - /datacenter/ucp/3.0/guides/user/access-ucp/cli-based-access/ - /ee/ucp/user/access-ucp/cli-based-access/ --- diff --git a/ee/ucp/user-access/index.md b/ee/ucp/user-access/index.md index 8d9dfff61e..f20b9534d6 100644 --- a/ee/ucp/user-access/index.md +++ b/ee/ucp/user-access/index.md @@ -13,7 +13,7 @@ way, from your browser. Docker UCP secures your cluster by using -[role-based access control](../../access-control/index.md). +[role-based access control](../authorization/index.md). From the browser, administrators can: * Manage cluster configurations, @@ -30,4 +30,4 @@ containers, and only when they're granted access by an administrator. ## Where to go next - [Authorization](../authorization.md) -- [Access UCP from the CLI](cli.md) \ No newline at end of file +- [Access UCP from the CLI](cli.md) diff --git a/ee/ucp/user-access/kubectl.md b/ee/ucp/user-access/kubectl.md index defebb992e..c2ff249d3d 100644 --- a/ee/ucp/user-access/kubectl.md +++ b/ee/ucp/user-access/kubectl.md @@ -83,7 +83,7 @@ You can download the binary from this [link](https://storage.googleapis.com/kube If you have curl installed on your system, you use these commands in Powershell. ```cmd -$env:k8sversion = "v1.8.11" +$env:k8sversion = "v1.11.5" curl https://storage.googleapis.com/kubernetes-release/release/$env:k8sversion/bin/windows/amd64/kubectl.exe ``` diff --git a/ee/upgrade.md b/ee/upgrade.md index 19f1c4cd60..28f0120f75 100644 --- a/ee/upgrade.md +++ b/ee/upgrade.md @@ -150,7 +150,7 @@ With an exhausted network, you can triage it using the following steps. 1. SSH into a manager node on a cluster where your applications are running. -2. Check the `docker service ls` output. It will diplay the service that is unable to completely fill all its replicas such as: +2. Check the `docker service ls` output. It will display the service that is unable to completely fill all its replicas such as: ``` ID NAME MODE REPLICAS IMAGE PORTS diff --git a/engine/examples/postgresql_service.md b/engine/examples/postgresql_service.md index 6d3265bc7c..0ff7604200 100644 --- a/engine/examples/postgresql_service.md +++ b/engine/examples/postgresql_service.md @@ -21,7 +21,7 @@ suitably secure. # example Dockerfile for https://docs.docker.com/engine/examples/postgresql_service/ # -FROM ubuntu +FROM ubuntu:16.04 # Add the PostgreSQL PGP key to verify their Debian packages. # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc diff --git a/engine/examples/running_ssh_service.md b/engine/examples/running_ssh_service.md index 7cb4c50d86..96ce7072d2 100644 --- a/engine/examples/running_ssh_service.md +++ b/engine/examples/running_ssh_service.md @@ -6,16 +6,25 @@ title: Dockerize an SSH service ## Build an `eg_sshd` image +### Generate a secure root password for your image + +Using a static password for root access is dangerous. Create a random password before proceeding. + +### Build the image + The following `Dockerfile` sets up an SSHd service in a container that you can use to connect to and inspect other container's volumes, or to get -quick access to a test container. +quick access to a test container. Make the following substitutions: + +- With `RUN echo 'root:THEPASSWORDYOUCREATED' | chpasswd`, replace "THEPASSWORDYOUCREATED" with the password you've previously generated. +- With `RUN sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config`, use `without-password` instead of `prohibit-password` for Ubuntu 14.04. ```Dockerfile FROM ubuntu:16.04 RUN apt-get update && apt-get install -y openssh-server RUN mkdir /var/run/sshd -RUN echo 'root:screencast' | chpasswd +RUN echo 'root:THEPASSWORDYOUCREATED' | chpasswd RUN sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config # SSH login fix. Otherwise user is kicked off after login @@ -28,6 +37,7 @@ EXPOSE 22 CMD ["/usr/sbin/sshd", "-D"] ``` + Build the image using: ```bash diff --git a/engine/reference/commandline/builder.md b/engine/reference/commandline/builder.md new file mode 100644 index 0000000000..ddee46d4e3 --- /dev/null +++ b/engine/reference/commandline/builder.md @@ -0,0 +1,15 @@ +--- +datafolder: engine-cli +datafile: docker_builder +title: docker builder +--- + + + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/reference/commandline/builder_prune.md b/engine/reference/commandline/builder_prune.md new file mode 100644 index 0000000000..649f9a5cfa --- /dev/null +++ b/engine/reference/commandline/builder_prune.md @@ -0,0 +1,15 @@ +--- +datafolder: engine-cli +datafile: docker_builder_prune +title: docker builder prune +--- + + + +{% include cli.md datafolder=page.datafolder datafile=page.datafile %} diff --git a/engine/release-notes.md b/engine/release-notes.md index cac26a4d50..a494943744 100644 --- a/engine/release-notes.md +++ b/engine/release-notes.md @@ -10,7 +10,7 @@ redirect_from: --- This document describes the latest changes, additions, known issues, and fixes -for Docker Engine Enterprise Edition (Docker EE) and Community Edition (CE) +for Docker Engine Enterprise Edition (Docker EE) and Community Edition (CE). Docker EE is a superset of all the features in Docker CE. It incorporates defect fixes that you can use in environments where new features cannot be adopted as quickly for @@ -29,6 +29,74 @@ consistency and compatibility reasons. > `sudo apt install docker-ce docker-ce-cli containerd.io`. See the install instructions > for the corresponding linux distro for details. +## 18.09.6 + +2019-05-06 + +### Builder +* Fixed `COPY` and `ADD` with multiple `` to not invalidate cache if `DOCKER_BUILDKIT=1`.[moby/moby#38964](https://github.com/moby/moby/issues/38964) + +### Networking +* Cleaned up the cluster provider when the agent is closed. [docker/libnetwork#2354](https://github.com/docker/libnetwork/pull/2354) +* Windows: Now selects a random host port if the user does not specify a host port. [docker/libnetwork#2369](https://github.com/docker/libnetwork/pull/2369) +* `--service-cluster-ip-range` is now configurable for UCP install. [docker/orca#10263](https://github.com/docker/orca/issues/10263) + +### Known Issues +* There are [important changes](/ee/upgrade) to the upgrade process that, if not correctly followed, can have an impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before 18.09 to version 18.09 or later. + +## 18.09.5 + +2019-04-11 + +### Builder + +* Fixed `DOCKER_BUILDKIT=1 docker build --squash ..` [docker/engine#176](https://github.com/docker/engine/pull/176) + +### Client + +* Fixed tty initial size error. [docker/cli#1775](https://github.com/docker/cli/pull/1775) +* Fixed dial-stdio goroutine leakage. [docker/cli#1795](https://github.com/docker/cli/pull/1795) +* Fixed the stack informer's selector used to track deployment. [docker/cli#1794](https://github.com/docker/cli/pull/1794) + +### Networking + +* Fixed `network=host` using wrong `resolv.conf` with `systemd-resolved`. [docker/engine#180](https://github.com/docker/engine/pull/180) +* Fixed Windows ARP entries getting corrupted randomly under load. [docker/engine#192](https://github.com/docker/engine/pull/192) + +### Runtime +* Now showing stopped containers with restart policy as `Restarting`. [docker/engine#181](https://github.com/docker/engine/pull/181) +* Now using original process spec for execs. [docker/engine#178](https://github.com/docker/engine/pull/178) + +### Swarm Mode + +* Fixed leaking task resources when nodes are deleted. [docker/engine#185](https://github.com/docker/engine/pull/185) + +### Known Issues + +* There are [important changes](/ee/upgrade) to the upgrade process that, if not correctly followed, can have an impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before 18.09 to version 18.09 or later. + +## 18.09.4 + + 2019-03-28 + +### Builder + +* Added validation for `git ref` to avoid misinterpretation as a flag. [moby/moby#38944](https://github.com/moby/moby/pull/38944) + +### Runtime + +* Fixed `docker cp` error for filenames greater than 100 characters. [moby/moby#38634](https://github.com/moby/moby/pull/38634) +* Fixed `layer/layer_store` to ensure `NewInputTarStream` resources are released. [moby/moby#38413](https://github.com/moby/moby/pull/38413) +* Increased GRPC limit for `GetConfigs`. [moby/moby#38800](https://github.com/moby/moby/pull/38800) +* Updated `containerd` 1.2.5. [docker/engine#173](https://github.com/docker/engine/pull/173) + +### Swarm Mode +* Fixed nil pointer exception when joining node to swarm. [moby/moby#38618](https://github.com/moby/moby/issues/38618) +* Fixed issue for swarm nodes not being able to join as masters if http proxy is set. [moby/moby#36951] + +### Known Issues +* There are [important changes to the upgrade process](/ee/upgrade) that, if not correctly followed, can have impact on the availability of applications running on the Swarm during upgrades. These constraints impact any upgrades coming from any version before 18.09 to version 18.09 or later. + ## 18.09.3 2019-02-28 @@ -246,7 +314,7 @@ Update your configuration if this command prints a non-empty value for `MountFla ### Deprecation Notice -As of EE 2.2, Docker will deprecate support for Device Mapper as a storage driver. It will continue to be supported at this +As of EE 2.1, Docker has deprecated support for Device Mapper as a storage driver. It will continue to be supported at this time, but support will be removed in a future release. Docker will continue to support Device Mapper for existing EE 2.0 and 2.1 customers. Please contact Sales for more information. @@ -263,6 +331,23 @@ Ubuntu 14.04 "Trusty Tahr" [docker-ce-packaging#255](https://github.com/docker/d ## Older Docker Engine EE Release notes +## 18.03.1-ee-8 + + 2019-03-28 + +### Builder + +* Added validation for `git ref` to avoid misinterpreation as a flag. [moby/moby#38944](https://github.com/moby/moby/pull/38944) + +### Runtime + +* Fixed `docker cp` error for filenames greater than 100 characters. [moby/moby#38634] +* Fixed `layer/layer_store` to ensure `NewInputTarStream` resources are released. [moby/moby#38413] + +### Swarm Mode + +* Fixed issue for swarm nodes not being able to join as masters if http proxy is set. [moby/moby#36951] + ## 18.03.1-ee-7 2019-02-28 @@ -391,6 +476,19 @@ Ubuntu 14.04 "Trusty Tahr" [docker-ce-packaging#255](https://github.com/docker/d + Support for `--chown` with `COPY` and `ADD` in `Dockerfile`. + Added functionality for the `docker logs` command to include the output of multiple logging drivers. +## 17.06.2-ee-21 +2019-04-11 + +### Builder + +* Added validation for git ref so it can't be misinterpreted as a flag. [moby/moby#38944](https://github.com/moby/moby/pull/38944) + +### Runtime + +* Fixed `docker cp` error with filenames greater than 100 characters. [moby/moby#38634](https://github.com/moby/moby/pull/38634) +* Removed temporary hot-fix and applied latest upstream patches for CVE-2019-5736. [docker/runc#9](https://github.com/docker/runc/pull/9) +* Fixed rootfs: umount all procfs and sysfs with `--no-pivot`. [docker/runc#10](https://github.com/docker/runc/pull/10) + ## 17.06.2-ee-20 2019-02-28 diff --git a/engine/security/certificates.md b/engine/security/certificates.md index bea1564678..52bec4ec7d 100644 --- a/engine/security/certificates.md +++ b/engine/security/certificates.md @@ -17,7 +17,7 @@ properly authenticated using *certificate-based client-server authentication*. We show you how to install a Certificate Authority (CA) root certificate for the registry and how to set the client TLS certificate for verification. -## Understanding the configuration +## Understand the configuration A custom certificate is configured by creating a directory under `/etc/docker/certs.d` using the same name as the registry's hostname, such as @@ -55,7 +55,7 @@ purposes only. You should consult your operating system documentation for creating an os-provided bundled certificate chain. -## Creating the client certificates +## Create the client certificates Use OpenSSL's `genrsa` and `req` commands to first generate an RSA key and then use the key to create the certificate. @@ -89,7 +89,7 @@ If the Docker registry is accessed without a port number, do not add the port to └── ca.crt ``` -## Related Information +## Related information * [Use trusted images](index.md) * [Protect the Docker daemon socket](https.md) diff --git a/engine/security/https.md b/engine/security/https.md index 0e8bdd4d3d..38acca5f4d 100644 --- a/engine/security/https.md +++ b/engine/security/https.md @@ -21,12 +21,12 @@ it only connects to servers with a certificate signed by that CA. > Advanced topic > > Using TLS and managing a CA is an advanced topic. Please familiarize yourself -> with OpenSSL, x509 and TLS before using it in production. +> with OpenSSL, x509, and TLS before using it in production. {:.important} ## Create a CA, server and client keys with OpenSSL -> **Note**: replace all instances of `$HOST` in the following example with the +> **Note**: Replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. First, on the **Docker daemon's host machine**, generate CA private and public keys: @@ -60,7 +60,7 @@ Now that you have a CA, you can create a server key and certificate signing request (CSR). Make sure that "Common Name" matches the hostname you use to connect to Docker: -> **Note**: replace all instances of `$HOST` in the following example with the +> **Note**: Replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. $ openssl genrsa -out server-key.pem 4096 @@ -102,7 +102,7 @@ Docker clients. For client authentication, create a client key and certificate signing request: -> **Note**: for simplicity of the next couple of steps, you may perform this +> **Note**: For simplicity of the next couple of steps, you may perform this > step on the Docker daemon's host machine as well. $ openssl genrsa -out key.pem 4096 @@ -160,7 +160,7 @@ certificates and trusted CA: > need to copy your CA certificate, your server certificate, and your client > certificate to that machine. -> **Note**: replace all instances of `$HOST` in the following example with the +> **Note**: Replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. $ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem \ diff --git a/engine/security/seccomp.md b/engine/security/seccomp.md index 01e677c18f..c001f28c7a 100644 --- a/engine/security/seccomp.md +++ b/engine/security/seccomp.md @@ -29,7 +29,7 @@ The default `seccomp` profile provides a sane default for running containers wit seccomp and disables around 44 system calls out of 300+. It is moderately protective while providing wide application compatibility. The default Docker profile can be found -[here](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json)). +[here](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json). In effect, the profile is a whitelist which denies access to system calls by default, then whitelists specific system calls. The profile works by defining a diff --git a/engine/security/trust/content_trust.md b/engine/security/trust/content_trust.md index fd82810f1f..bedd81b38a 100644 --- a/engine/security/trust/content_trust.md +++ b/engine/security/trust/content_trust.md @@ -20,7 +20,7 @@ client-side or runtime verification of the integrity and publisher of specific image tags. Through DCT, image publishers can sign their images and image consumers can -ensure that the images they use are signed. Publishers could be be individuals +ensure that the images they use are signed. Publishers could be individuals or organizations manually signing their content or automated software supply chains signing content as part of their release process. @@ -148,7 +148,7 @@ Unique Name (GUN). If this is the first time you are adding a delegation to that repository, this command will also initiate the repository, using a local Notary canonical root key. To understand more about initiating a repository, and the role of delegations, head to -[delegations for content trust](trust_delegation/#managing-delegations-in-a-notary-server) +[delegations for content trust](trust_delegation/#managing-delegations-in-a-notary-server). ``` $ docker trust signer add --key cert.pem jeff dtr.example.com/admin/demo @@ -230,6 +230,7 @@ trusted sources, with repositories and tags signed with the commands [above](#si Engine Signature Verification prevents the following: * `$ docker container run` of an unsigned image. +* `$ docker pull` of an unsigned image. * `$ docker build` where the `FROM` image is not signed or is not scratch. DCT does not verify that a running container’s filesystem has not been altered @@ -242,13 +243,13 @@ unsigned images from being imported, loaded, or created. DCT is controlled by the Docker Engine's configuration file. By default this is found at `/etc/docker/daemon.json`. More details on this file can be found -[here](/engine/reference/commandline/dockerd/#daemon-configuration-file) +[here](/engine/reference/commandline/dockerd/#daemon-configuration-file). The `content-trust` flag is based around a `mode` variable instructing the engine whether to enforce signed images, and a `trust-pinning` variable instructing the engine which sources to trust. -`Mode` can take 3 variables: +`Mode` can take three variables: * `Disabled` - Verification is not active and the remainder of the content-trust related metadata will be ignored. This is the default value if `mode` is not @@ -268,7 +269,7 @@ verified successfully will not be pulled or run. } ``` -### Official Docker Images +### Official Docker images All official Docker library images found on the Docker Hub (docker.io/library/*) are signed by the same Notary root key. This root key's ID has been embedded @@ -286,9 +287,9 @@ Docker images can be used. Specify: } ``` -### User-Signed Images +### User-Signed images -There are 2 options for trust pinning user-signed images: +There are two options for trust pinning user-signed images: * Notary Canonical Root Key ID (DCT Root Key) is an ID that describes *just* the root key used to sign a repository (or rather its respective keys). This is the diff --git a/engine/security/trust/trust_automation.md b/engine/security/trust/trust_automation.md index ec40980d0e..7a76468fd1 100644 --- a/engine/security/trust/trust_automation.md +++ b/engine/security/trust/trust_automation.md @@ -14,7 +14,7 @@ that and understand its prerequisites. When working directly with the Notary client, it uses its [own set of environment variables](/notary/reference/client-config.md#environment-variables-optional). -## Adding a Delegation Private Key +## Add a delegation private key To automate importing a delegation private key to the local Docker trust store, we need to pass a passphrase for the new key. This passphrase will be required @@ -28,7 +28,7 @@ Loading key from "delegation.key"... Successfully imported key from delegation.key ``` -## Adding a Delegation Public Key +## Add a delegation public key If you initialising a repository at the same time as adding a Delegation public key, then you will need to use the local Notary Canonical Root Key's @@ -50,7 +50,7 @@ Successfully initialized "dtr.example.com/admin/demo" Successfully added signer: dtr.example.com/admin/demo ``` -## Signing an Image +## Sign an image Finally when signing an image, we will need to export the passphrase of the signing key. This was created when the key was loaded into the local Docker @@ -68,7 +68,7 @@ Signing and pushing trust metadata Successfully signed dtr.example.com/admin/demo:1 ``` -## Building with content trust +## Build with content trust You can also build with content trust. Before running the `docker build` command, you should set the environment variable `DOCKER_CONTENT_TRUST` either manually or diff --git a/engine/security/trust/trust_delegation.md b/engine/security/trust/trust_delegation.md index 2aa6e46973..12533b020d 100644 --- a/engine/security/trust/trust_delegation.md +++ b/engine/security/trust/trust_delegation.md @@ -2,36 +2,37 @@ description: Delegations for content trust keywords: trust, security, delegations, keys, repository title: Delegations for content trust +redirect_from: +- /ee/dtr/user/access-dtr/configure-your-notary-client/ --- Delegations in Docker Content Trust (DCT) allow you to control who can and cannot sign -an image tag. A delegation will have a pair of delegation keys, public and -private. A delegation could contain multiple pairs of keys, contributors, to -allow multiple users to be part of a delegation, and to support key rotation. +an image tag. A delegation will have a pair of private and public delegation keys. A delegation +could contain multiple pairs of keys and contributors in order to a) allow multiple users +to be part of a delegation, and b) to support key rotation. The most important delegation within Docker Content Trust is `targets/releases`. This is seen as the canonical source of a trusted image tag, and without a contributor's key being under this delegation, they will be unable to sign a tag. Fortunately when using the `$ docker trust` commands, we will automatically -initialise a repository, manage the repository keys, and when a collaborator -gets added with `docker trust signer add` we will add their key to the -`targets/releases` delegation automatically. +initialize a repository, manage the repository keys, and add a collaborator's key to the +`targets/releases` delegation via `docker trust signer add`. ## Configuring the Docker Client -By default the `$ docker trust` commands are expecting the Notary server URL -to be the same as the Docker Registry URL specified in the image tag. When -using the Docker Hub or Docker Trusted Registry this is the case as a internal -proxy redirects the request; however for self hosted environments or 3rd party -registries you will need to specify an alternative URL for the notary server. -This is done with: +By default, the `$ docker trust` commands expect the notary server URL to be the +same as the registry URL specified in the image tag (following a similar logic to +`$ docker push`). When using Docker Hub or DTR, the notary +server URL is the same as the registry URL. However, for self-hosted +environments or 3rd party registries, you will need to specify an alternative +URL for the notary server. This is done with: ``` export DOCKER_CONTENT_TRUST_SERVER=https://: ``` -If you do not export this variable in self-hosted environments you may see +If you do not export this variable in self-hosted environments, you may see errors such as: ``` @@ -45,15 +46,47 @@ WARN[0000] Error while downloading remote metadata, using cached timestamp - thi [...] ``` +If you have enabled authentication for your notary server, or are using DTR, you will need to log in +before you can push data to the notary server. + +``` +$ docker login dtr.example.com/user/repo +Username: admin +Password: + +Login Succeeded + +$ docker trust signer add --key cert.pem jeff dtr.example.com/user/repo +Adding signer "jeff" to dtr.example.com/user/repo... +Initializing signed repository for dtr.example.com/user/repo... +Successfully initialized "dtr.example.com/user/repo" +Successfully added signer: jeff to dtr.example.com/user/repo +``` + +If you do not log in, you will see: + +```bash +$ docker trust signer add --key cert.pem jeff dtr.example.com/user/repo +Adding signer "jeff" to dtr.example.com/user/repo... +Initializing signed repository for dtr.example.com/user/repo... +you are not authorized to perform this operation: server returned 401. + +Failed to add signer to: dtr.example.com/user/repo +``` + +If you are using DTR and would like to work with a remote UCP's signing policy, +you must [register your DTR instance with that remote UCP](/ee/dtr/user/manage-images/sign-images/trust-with-remote-ucp/#registering-dtr-with-a-remote-universal-control-plane). +See [Using Docker Content Trust with a Remote UCP Cluster](/ee/dtr/user/manage-images/sign-images/trust-with-remote-ucp/) for more details. + ## Configuring the Notary Client Some of the more advanced features of DCT require the Notary CLI. To install and configure the Notary CLI: 1) Download the [client](https://github.com/theupdateframework/notary/releases) -and ensure that it is available on your path +and ensure that it is available on your path. -2) Create a configuration file at ~/.notary/config.json with the following content: +2) Create a configuration file at `~/.notary/config.json` with the following content: ``` { @@ -65,10 +98,9 @@ and ensure that it is available on your path } ``` -This configuration file will tell Notary where the local Docker Trust data is -stored, as well as which Notary server to use by default. +The newly created configuration file contains information about the location of your local Docker trust data and the notary server URL. -For more detailed information about how to use Notary outside of the +For more detailed information about how to use notary outside of the Docker Content Trust use cases, refer to the Notary CLI documentation [here](https://github.com/theupdateframework/notary/blob/master/docs/command_reference.md) @@ -338,7 +370,7 @@ Successfully removed ben from dtr.example.com/admin/demo #### Troubleshooting -1) If you see an error that there are no useable keys in `targets/releases`, you +1) If you see an error that there are no usable keys in `targets/releases`, you will need to add additional delegations using `docker trust signer add` before resigning images. @@ -474,3 +506,4 @@ No signatures or cannot access dtr.example.com/admin/demo * [Manage keys for content trust](trust_key_mng.md) * [Automation with content trust](trust_automation.md) * [Play in a content trust sandbox](trust_sandbox.md) +* [Using Docker Content Trust with a Remote UCP Cluster](/ee/dtr/user/manage-images/sign-images/trust-with-remote-ucp.md) diff --git a/engine/security/trust/trust_key_mng.md b/engine/security/trust/trust_key_mng.md index 6e797223b8..c83b0763c2 100644 --- a/engine/security/trust/trust_key_mng.md +++ b/engine/security/trust/trust_key_mng.md @@ -34,7 +34,7 @@ locally client-side. [Use the Notary CLI to manage your snapshot key locally again](/notary/advanced_usage.md#rotate-keys) for repositories created with newer versions of Docker. -## Choosing a passphrase +## Choose a passphrase The passphrases you chose for both the root key and your repository key should be randomly generated and stored in a password manager. Having the repository key diff --git a/engine/security/trust/trust_sandbox.md b/engine/security/trust/trust_sandbox.md index 65551042c9..08927899c8 100644 --- a/engine/security/trust/trust_sandbox.md +++ b/engine/security/trust/trust_sandbox.md @@ -121,7 +121,7 @@ the `trustsandbox` container, the Notary server, and the Registry server. images are downloaded from Docker Hub. -## Playing in the sandbox +## Play in the sandbox Now that everything is setup, you can go into your `trustsandbox` container and start testing Docker content trust. From your host machine, obtain a shell @@ -283,7 +283,7 @@ feel free to play with it and see how it behaves. If you find any security issues with Docker, feel free to send us an email at . -## Cleaning up your sandbox +## Clean up your sandbox When you are done, and want to clean up all the services you've started and any anonymous volumes that have been created, just run the following command in the diff --git a/engine/swarm/configs.md b/engine/swarm/configs.md index 99e99eadd4..a739122e39 100644 --- a/engine/swarm/configs.md +++ b/engine/swarm/configs.md @@ -122,8 +122,8 @@ Docker configs. ### Defining and using configs in compose files -Both the `docker compose` and `docker stack` commands support defining configs -in a compose file. See +The `docker stack` command supports defining configs in a Compose file. +However, the `configs` key is not supported for `docker compose`. See [the Compose file reference](/compose/compose-file/#configs) for details. ### Simple example: Get started with configs diff --git a/engine/swarm/services.md b/engine/swarm/services.md index 3830d5063b..8e5e0f3f48 100644 --- a/engine/swarm/services.md +++ b/engine/swarm/services.md @@ -565,7 +565,7 @@ services, you specify the number of replica tasks for the swarm manager to schedule onto available nodes. For global services, the scheduler places one task on each available node that meets the service's [placement constraints](#placement-constraints) and -[resource requirements](#reserve-cpu-or-memory-for-a-service). +[resource requirements](#reserve-memory-or-cpus-for-a-service). You control the type of service using the `--mode` flag. If you don't specify a mode, the service defaults to `replicated`. For replicated services, you specify @@ -628,7 +628,7 @@ operator (`==` or `!=`). For replicated services, it is possible that all services run on the same node, or each node only runs one replica, or that some nodes don't run any replicas. For global services, the service runs on every node that meets the placement constraint and any [resource -requirements](#reserve-cpu-or-memory-for-a-service). +requirements](#reserve-memory-or-cpus-for-a-service). ```bash $ docker service create \ diff --git a/engine/swarm/swarm-tutorial/rolling-update.md b/engine/swarm/swarm-tutorial/rolling-update.md index 2db2bf8d28..1e9170772a 100644 --- a/engine/swarm/swarm-tutorial/rolling-update.md +++ b/engine/swarm/swarm-tutorial/rolling-update.md @@ -7,15 +7,15 @@ notoc: true In a previous step of the tutorial, you [scaled](scale-service.md) the number of instances of a service. In this part of the tutorial, you deploy a service based -on the Redis 3.0.6 container image. Then you upgrade the service to use the +on the Redis 3.0.6 container tag. Then you upgrade the service to use the Redis 3.0.7 container image using rolling updates. 1. If you haven't already, open a terminal and ssh into the machine where you run your manager node. For example, the tutorial uses a machine named `manager1`. -2. Deploy Redis 3.0.6 to the swarm and configure the swarm with a 10 second - update delay: +2. Deploy your Redis tag to the swarm and configure the swarm with a 10 second + update delay. Note that the following example shows an older Redis tag: ```bash $ docker service create \ diff --git a/get-started/index.md b/get-started/index.md index e172e7d4cd..736a5a4226 100644 --- a/get-started/index.md +++ b/get-started/index.md @@ -135,7 +135,7 @@ is available in Docker version 17.12.0-ce, build c97c6d6 ``` -2. Run `docker info` or (`docker version` without `--`) to view even more details about your docker installation: +2. Run `docker info` (or `docker version` without `--`) to view even more details about your Docker installation: ```shell docker info diff --git a/get-started/part2.md b/get-started/part2.md index cf0407ebf3..89968c1e96 100644 --- a/get-started/part2.md +++ b/get-started/part2.md @@ -402,6 +402,7 @@ application by running this container in a **service**. [Continue to Part 3 >>](part3.md){: class="button outline-btn"} +Or, learn how to [launch your container on your own machine using Digital Ocean](https://docs.docker.com/machine/examples/ocean/){: target="_blank" class="_" }. ## Recap and cheat sheet (optional) @@ -417,7 +418,7 @@ ones if you'd like to explore a bit before moving on. ```shell docker build -t friendlyhello . # Create image using this directory's Dockerfile -docker run -p 4000:80 friendlyhello # Run "friendlyname" mapping port 4000 to 80 +docker run -p 4000:80 friendlyhello # Run "friendlyhello" mapping port 4000 to 80 docker run -d -p 4000:80 friendlyhello # Same thing, but in detached mode docker container ls # List all running containers docker container ls -a # List all containers, even those not running diff --git a/get-started/part3.md b/get-started/part3.md index 28ec86e5ca..180f066f26 100644 --- a/get-started/part3.md +++ b/get-started/part3.md @@ -96,8 +96,9 @@ This `docker-compose.yml` file tells Docker to do the following: - Pull [the image we uploaded in step 2](part2.md) from the registry. - Run 5 instances of that image as a service - called `web`, limiting each one to use, at most, 10% of the CPU (across all - cores), and 50MB of RAM. + called `web`, limiting each one to use, at most, 10% of a single core of + CPU time (this could also be e.g. "1.5" to mean 1 and half core for each), + and 50MB of RAM. - Immediately restart containers if one fails. diff --git a/get-started/part4.md b/get-started/part4.md index fbbea8ac25..fb03928f9d 100644 --- a/get-started/part4.md +++ b/get-started/part4.md @@ -126,6 +126,8 @@ so they can connect to each other. Now, create a couple of VMs using our node management tool, `docker-machine`: +> **Note**: you need to run the following as administrator or else you don't have the permission to create hyperv VMs! + ```shell docker-machine create -d hyperv --hyperv-virtual-switch "myswitch" myvm1 docker-machine create -d hyperv --hyperv-virtual-switch "myswitch" myvm2 @@ -143,6 +145,8 @@ You now have two VMs created, named `myvm1` and `myvm2`. Use this command to list the machines and get their IP addresses. +> **Note**: you need to run the following as administrator or else you don't get any resonable output (only "UNKNOWN"). + ```shell docker-machine ls ``` @@ -435,6 +439,10 @@ look: > > - Port 7946 TCP/UDP for container network discovery. > - Port 4789 UDP for the container ingress network. +> +> Double check what you have in the ports section under your web +> service and make sure the ip addresses you enter in your browser +> or curl reflects that ## Iterating and scaling your app diff --git a/install/linux/docker-ce/binaries.md b/install/linux/docker-ce/binaries.md index 31ae8dea87..ee23514359 100644 --- a/install/linux/docker-ce/binaries.md +++ b/install/linux/docker-ce/binaries.md @@ -17,7 +17,13 @@ system's package management system to manage Docker installation and upgrades. Be aware that 32-bit static binary archives do not include the Docker daemon. Static binaries for the Docker daemon binary are only available for Linux (as -`dockerd`). Static binaries for the Docker client are available for Linux and macOS (as `docker`). +`dockerd`). +Static binaries for the Docker client are available for Linux and macOS (as `docker`). + +This topic discusses binary installation for both Linux and macOS: + +- [Install daemon and client binaries on Linux](#install-daemon-and-client-binaries-on-linux ) +- [Install client binaries on macOS](#install-client-binaries-on-macos ) ## Install daemon and client binaries on Linux @@ -110,12 +116,6 @@ instructions for enabling and configuring AppArmor or SELinux. This command downloads a test image and runs it in a container. When the container runs, it prints an informational message and exits. -### Next steps - -- Continue to [Post-installation steps for Linux](/install/linux/linux-postinstall.md) - -- Continue with the [User Guide](/engine/userguide/index.md). - ## Install client binaries on macOS The macOS binary includes the Docker client only. It does not include the @@ -162,4 +162,8 @@ version. ## Next steps -Continue with the [User Guide](/get-started/index.md). +- On Linux: + - Continue to [Post-installation steps for Linux](/install/linux/linux-postinstall.md) + - Continue with the [User Guide](/engine/userguide/index.md). +- On macOS: + - Continue with the [User Guide](/get-started/index.md). diff --git a/machine/examples/ocean.md b/machine/examples/ocean.md index 1fd0f0663c..f4bb2cde80 100644 --- a/machine/examples/ocean.md +++ b/machine/examples/ocean.md @@ -143,4 +143,5 @@ provider console, Machine loses track of the server status. Use the - [Understand Machine concepts](../concepts.md) - [Docker Machine driver reference](../drivers/index.md) - [Docker Machine subcommand reference](../reference/index.md) +- [Create containers for your Docker Machine](../../get-started/part2.md) - [Provision a Docker Swarm cluster with Docker Machine](/swarm/provision-with-machine.md) diff --git a/machine/get-started.md b/machine/get-started.md index 9177a9bf1c..45d0e147da 100644 --- a/machine/get-started.md +++ b/machine/get-started.md @@ -7,7 +7,7 @@ title: Get started with Docker Machine and a local VM Let's take a look at using `docker-machine` to create, use and manage a Docker host inside of a local virtual machine. -## Prerequisite Information +## Prerequisite information With the advent of [Docker Desktop for Mac](/docker-for-mac/index.md) and [Docker Desktop for Windows](/docker-for-windows/index.md) as replacements for [Docker diff --git a/reference/dtr/2.6/cli/backup.md b/reference/dtr/2.6/cli/backup.md index 4be9f3fda9..b69f099a40 100644 --- a/reference/dtr/2.6/cli/backup.md +++ b/reference/dtr/2.6/cli/backup.md @@ -15,12 +15,42 @@ docker run -i --rm docker/dtr \ backup [command options] > backup.tar ``` -### Example Usage +### Example Commands + +#### Basic + ```bash -docker run -i --rm docker/dtr \ +docker run -i --rm --log-driver none docker/dtr:2.6.5 \ backup --ucp-ca "$(cat ca.pem)" --existing-replica-id 5eb9459a7832 > backup.tar ``` +#### Advanced (with chained commands) + +The following command has been tested on Linux: + +{% raw %} +```none +DTR_VERSION=$(docker container inspect $(docker container ps -f \ + name=dtr-registry -q) | grep -m1 -Po '(?<=DTR_VERSION=)\d.\d.\d'); \ +REPLICA_ID=$(docker inspect -f '{{.Name}}' $(docker ps -q -f name=dtr-rethink) | cut -f 3 -d '-')); \ +read -p 'ucp-url (The UCP URL including domain and port): ' UCP_URL; \ +read -p 'ucp-username (The UCP administrator username): ' UCP_ADMIN; \ +read -sp 'ucp password: ' UCP_PASSWORD; \ +docker run --log-driver none -i --rm \ + --env UCP_PASSWORD=$UCP_PASSWORD \ + docker/dtr:$DTR_VERSION backup \ + --ucp-username $UCP_ADMIN \ + --ucp-url $UCP_URL \ + --ucp-ca "$(curl https://${UCP_URL}/ca)" \ + --existing-replica-id $REPLICA_ID > \ + dtr-metadata-${DTR_VERSION}-backup-$(date +%Y%m%d-%H_%M_%S).tar +``` +{% endraw %} + +For a detailed explanation on the advanced example, see +[Back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata). +To learn more about the `--log-driver` option for `docker run`, see [docker run reference](/engine/reference/run/#logging-drivers---log-driver). + ## Description This command creates a `tar` file with the contents of the volumes used by diff --git a/reference/dtr/2.6/cli/install.md b/reference/dtr/2.6/cli/install.md index 325a09f5b4..4473bd74b7 100644 --- a/reference/dtr/2.6/cli/install.md +++ b/reference/dtr/2.6/cli/install.md @@ -38,7 +38,7 @@ $ docker run -it --rm docker/dtr:{{ site.dtr_version }}.0 install \ | `--debug` | $DEBUG | Enable debug mode for additional logs. | | `--dtr-ca` | $DTR_CA | Use a PEM-encoded TLS CA certificate for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own root CA public certificate with `--dtr-ca "$(cat ca.pem)"`. | | `--dtr-cert` | $DTR_CERT | Use a PEM-encoded TLS certificate for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own public key certificate with `--dtr-cert "$(cat cert.pem)"`. If the certificate has been signed by an intermediate certificate authority, append its public key certificate at the end of the file to establish a chain of trust. | -| `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the URL you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users log in separately into the two applications. You can enable and disable single sign-on within your DTR system settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. | +| `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the URL you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users log in separately into the two applications. You can enable and disable single sign-on within your DTR system settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. Since [HSTS (HTTP Strict-Transport-Security) header](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) is included in all API responses, make sure to specify the FQDN (Fully Qualified Domain Name) of your DTR, or your browser may refuse to load the web interface. | | `--dtr-key` | $DTR_KEY | Use a PEM-encoded TLS private key for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own TLS private key with `--dtr-key "$(cat key.pem)"`. | | `--dtr-storage-volume` | $DTR_STORAGE_VOLUME | Customize the volume to store Docker images. By default DTR creates a volume to store the Docker images in the local filesystem of the node where DTR is running, without high-availability. Use this flag to specify a full path or volume name for DTR to store images. For high-availability, make sure all DTR replicas can read and write data on this volume. If you're using NFS, use `--nfs-storage-url` instead. | | `--enable-pprof` | $DTR_PPROF | Enables pprof profiling of the server. Use `--enable-pprof=false` to disable it. Once DTR is deployed with this flag, you can access the `pprof` endpoint for the api server at `/debug/pprof`, and the registry endpoint at `/registry_debug_pprof/debug/pprof`. | diff --git a/reference/dtr/2.6/cli/reconfigure.md b/reference/dtr/2.6/cli/reconfigure.md index 796144240f..b603010f5b 100644 --- a/reference/dtr/2.6/cli/reconfigure.md +++ b/reference/dtr/2.6/cli/reconfigure.md @@ -4,7 +4,7 @@ description: Change DTR configurations keywords: dtr, cli, reconfigure --- -Change DTR configurations +Change DTR configurations. ## Usage @@ -16,7 +16,7 @@ docker run -it --rm docker/dtr \ ## Description -This command changes DTR configuration settings. +This command changes DTR configuration settings. If you are using NFS as a storage volume, see [Use NFS](/ee/dtr/admin/configure/external-storage/nfs/) for details on changes to the reconfiguration process. DTR is restarted for the new configurations to take effect. To have no down time, configure your DTR for high availability. @@ -29,7 +29,7 @@ time, configure your DTR for high availability. | `--debug` | $DEBUG | Enable debug mode for additional logs of this bootstrap container (the log level of downstream DTR containers can be set with `--log-level`). | | `--dtr-ca` | $DTR_CA | Use a PEM-encoded TLS CA certificate for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own root CA public certificate with `--dtr-ca "$(cat ca.pem)"`. | | `--dtr-cert` | $DTR_CERT | Use a PEM-encoded TLS certificate for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own public key certificate with `--dtr-cert "$(cat cert.pem)"`. If the certificate has been signed by an intermediate certificate authority, append its public key certificate at the end of the file to establish a chain of trust. | -| `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the url you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users login separately into the two applications. You can enable and disable single sign-on in the DTR settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. | +| `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the url you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users login separately into the two applications. You can enable and disable single sign-on in the DTR settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. Since [HSTS (HTTP Strict-Transport-Security) header](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) is included in all API responses, make sure to specify the FQDN (Fully Qualified Domain Name) of your DTR, or your browser may refuse to load the web interface. | | `--dtr-key` | $DTR_KEY | Use a PEM-encoded TLS private key for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own TLS private key with `--dtr-key "$(cat key.pem)"`. | | `--dtr-storage-volume` | $DTR_STORAGE_VOLUME | Customize the volume to store Docker images. By default DTR creates a volume to store the Docker images in the local filesystem of the node where DTR is running, without high-availability. Use this flag to specify a full path or volume name for DTR to store images. For high-availability, make sure all DTR replicas can read and write data on this volume. If you're using NFS, use `--nfs-storage-url` instead. | | `--enable-pprof` | $DTR_PPROF | Enables pprof profiling of the server. Use `--enable-pprof=false` to disable it. Once DTR is deployed with this flag, you can access the pprof endpoint for the api server at `/debug/pprof`, and the registry endpoint at `/registry_debug_pprof/debug/pprof`. | @@ -40,13 +40,14 @@ time, configure your DTR for high availability. | `--log-host` | $LOG_HOST | The syslog system to send logs to. The endpoint to send logs to. Use this flag if you set `--log-protocol` to `tcp` or `udp`. | | `--log-level` | $LOG_LEVEL | Log level for all container logs when logging to syslog. Default: INFO. The supported log levels are `debug`, `info`, `warn`, `error`, or `fatal`. | | `--log-protocol` | $LOG_PROTOCOL | The protocol for sending logs. Default is internal. By default, DTR internal components log information using the logger specified in the Docker daemon in the node where the DTR replica is deployed. Use this option to send DTR logs to an external syslog system. The supported values are `tcp`, `udp`, and `internal`. Internal is the default option, stopping DTR from sending logs to an external system. Use this flag with `--log-host`. | -| `--nfs-storage-url` | $NFS_STORAGE_URL | Use NFS to store Docker images following this format: `nfs:///`. By default, DTR creates a volume to store the Docker images in the local filesystem of the node where DTR is running, without high availability. To use this flag, you need to install an NFS client library like `nfs-common` in the node where you're deploying DTR. You can test this by running `showmount -e `. When you join new replicas, they will start using NFS so there is no need to specify this flag. To reconfigure DTR to stop using NFS, leave this option empty: `--nfs-storage-url ""`. See [USE NFS](/ee/dtr/admin/configure/external-storage/nfs/) for more details. | +| `--nfs-storage-url` | $NFS_STORAGE_URL | When running DTR 2.5 (with experimental online garbage collection) and 2.6.0-2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. To work around the issue, manually create a storage volume on each DTR node and reconfigure DTR with `--dtr-storage-volume` and your newly-created volume instead. See [Reconfigure Using a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#reconfigureusingalocalnfsvolume) for more details. To reconfigure DTR to stop using NFS, leave this option empty: `--nfs-storage-url ""`. See [USE NFS](/ee/dtr/admin/configure/external-storage/nfs/) for more details. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. | | `--async-nfs` | $ASYNC_NFS | Use async NFS volume options on the replica specified in the `--existing-replica-id` option. The NFS configuration must be set with `--nfs-storage-url` explicitly to use this option. Using `--async-nfs` will bring down any containers on the replica that use the NFS volume, delete the NFS volume, bring it back up with the appropriate configuration, and restart any containers that were brought down. | | `--nfs-options` | $NFS_OPTIONS | Pass in NFS volume options verbatim for the replica specified in the `--existing-replica-id` option. The NFS configuration must be set with `--nfs-storage-url` explicitly to use this option. Specifying `--nfs-options` will pass in character-for-character the options specified in the argument when creating or recreating the NFS volume. For instance, to use NFS v4 with async, pass in "rw,nfsvers=4,async" as the argument. | | `--no-proxy` | $DTR_NO_PROXY | List of domains the proxy should not be used for. When using `--http-proxy` you can use this flag to specify a list of domains that you don't want to route through the proxy. Format `acme.com[, acme.org]`. | | `--replica-http-port` | $REPLICA_HTTP_PORT | The public HTTP port for the DTR replica. Default is `80`. This allows you to customize the HTTP port where users can reach DTR. Once users access the HTTP port, they are redirected to use an HTTPS connection, using the port specified with --replica-https-port. This port can also be used for unencrypted health checks. | | `--replica-https-port` | $REPLICA_HTTPS_PORT | The public HTTPS port for the DTR replica. Default is `443`. This allows you to customize the HTTPS port where users can reach DTR. Each replica can use a different port. | | `--replica-rethinkdb-cache-mb` | $RETHINKDB_CACHE_MB | The maximum amount of space in MB for RethinkDB in-memory cache used by the given replica. Default is auto. Auto is `(available_memory - 1024) / 2`. This config allows changing the RethinkDB cache usage per replica. You need to run it once per replica to change each one. | +| `--storage-migrated` | $STORAGE_MIGRATED | A flag added in 2.6.4 which lets you indicate the migration status of your storage data. Specify this flag if you are migrating to a new storage backend and have already moved all contents from your old backend to your new one. If not specified, DTR will assume the new backend is empty during a backend storage switch, and consequently destroy your existing tags and related image metadata. | | `--ucp-ca` | $UCP_CA | Use a PEM-encoded TLS CA certificate for UCP. Download the UCP TLS CA certificate from `https:///ca`, and use `--ucp-ca "$(cat ca.pem)"`. | | `--ucp-insecure-tls` | $UCP_INSECURE_TLS | Disable TLS verification for UCP. The installation uses TLS but always trusts the TLS certificate used by UCP, which can lead to MITM (man-in-the-middle) attacks. For production deployments, use `--ucp-ca "$(cat ca.pem)"` instead. | | `--ucp-password` | $UCP_PASSWORD | The UCP administrator password. | diff --git a/reference/dtr/2.6/cli/restore.md b/reference/dtr/2.6/cli/restore.md index a832c00031..640a34f69c 100644 --- a/reference/dtr/2.6/cli/restore.md +++ b/reference/dtr/2.6/cli/restore.md @@ -17,7 +17,10 @@ docker run -i --rm docker/dtr \ This command performs a fresh installation of DTR, and reconfigures it -with configuration data from a `tar` file generated by `docker/dtr backup`. +with configuration data from a `tar` file generated by [`docker/dtr backup`](backup.md). +If you are restoring DTR after a failure, please make sure you have destroyed the old DTR fully. See +[DTR disastery recovery](/ee/dtr/admin/disaster-recovery/) for Docker's recommended recovery strategies +based on your setup. There are three steps you can take to recover an unhealthy DTR cluster: @@ -46,9 +49,9 @@ DTR replicas for high availability. | `--dtr-cert` | $DTR_CERT | Use a PEM-encoded TLS certificate for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own TLS certificate with `--dtr-cert "$(cat ca.pem)"`. | | `--dtr-external-url` | $DTR_EXTERNAL_URL | URL of the host or load balancer clients use to reach DTR. When you use this flag, users are redirected to UCP for logging in. Once authenticated they are redirected to the URL you specify in this flag. If you don't use this flag, DTR is deployed without single sign-on with UCP. Users and teams are shared but users log in separately into the two applications. You can enable and disable single sign-on within your DTR system settings. Format `https://host[:port]`, where port is the value you used with `--replica-https-port`. | | `--dtr-key` | $DTR_KEY | Use a PEM-encoded TLS private key for DTR. By default DTR generates a self-signed TLS certificate during deployment. You can use your own TLS private key with `--dtr-key "$(cat ca.pem)"`. | -| `--dtr-storage-volume` | $DTR_STORAGE_VOLUME | Mandatory flag to allow for DTR to fall back to your configured storage setting at the time of backup. If you have previously configured DTR to use a full path or volume name for storage, specify this flag to use the same setting on restore. See [docker/dtr install](install.md) and [docker/dtr reconfigure](reconfigure.md) for usage details. Required if neither `--dtr-use-default-storage` nor `--nfs-storage-url` is set. | -| `--dtr-use-default-storage` | $DTR_DEFAULT_STORAGE | Mandatory flag to allow for DTR to fall back to either your local filesystem or cloud storage depending on what was configured at the time of backup. If cloud storage was configured, then the default storage on restore is cloud storage. Otherwise, local storage is used. Required if neither `--dtr-storage-volume` nor `--nfs-storage-url` is set. | -| `--nfs-storage-url` | $NFS_STORAGE_URL | Mandatory flag to allow for DTR to fall back to your configured storage setting at the time of backup. If NFS was previously configured, you must explicitly specify this flag to recover your NFS settings on restore. See [docker/dtr install](install.md) and [docker/dtr reconfigure](reconfigure.md) for NFS configuration options. Required if neither `--dtr-storage-volume` nor `--dtr-use-default-storage` is set. | +| `--dtr-storage-volume` | $DTR_STORAGE_VOLUME | Mandatory flag to allow for DTR to fall back to your configured storage setting at the time of backup. If you have previously configured DTR to use a full path or volume name for storage, specify this flag to use the same setting on restore. See [docker/dtr install](install.md) and [docker/dtr reconfigure](reconfigure.md) for usage details. | +| `--dtr-use-default-storage` | $DTR_DEFAULT_STORAGE | Mandatory flag to allow for DTR to fall back to your configured storage backend at the time of backup. If cloud storage was configured, then the default storage on restore is cloud storage. Otherwise, local storage is used. With DTR 2.5 (with experimental online garbage collection) and 2.6.0-2.6.3, this flag must be specified in order to keep your DTR metadata. If you encounter an issue with lost tags, see [Restore to Cloud Storage](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretocloudstorage) for Docker's recommended recovery strategy. [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. | +| `--nfs-storage-url` | $NFS_STORAGE_URL | Mandatory flag to allow for DTR to fall back to your configured storage setting at the time of backup. When running DTR 2.5 (with experimental online garbage collection) and 2.6.0-2.6.3, there is an issue with [reconfiguring and restoring DTR with `--nfs-storage-url`](/ee/dtr/release-notes#version-26) which leads to erased tags. Make sure to [back up your DTR metadata](/ee/dtr/admin/disaster-recovery/create-a-backup/#back-up-dtr-metadata) before you proceed. If NFS was previously configured, you have to manually create a storage volume on each DTR node and specify `--dtr-storage-volume` with the newly-created volume instead. See [Restore to a Local NFS Volume](https://success.docker.com/article/dtr-26-lost-tags-after-reconfiguring-storage#restoretoalocalnfsvolume) for more details. For additional NFS configuration options to support **NFS v4**, see [docker/dtr install](install.md) and [docker/dtr reconfigure](reconfigure.md). [Upgrade to 2.6.4](/reference/dtr/2.6/cli/upgrade/) and follow [Best practice for data migration in 2.6.4](/ee/dtr/admin/configure/external-storage/storage-backend-migration/#best-practice-for-data-migration) when switching storage backends. | | `--enable-pprof` | $DTR_PPROF | Enables pprof profiling of the server. Use `--enable-pprof=false` to disable it. Once DTR is deployed with this flag, you can access the `pprof` endpoint for the api server at `/debug/pprof`, and the registry endpoint at `/registry_debug_pprof/debug/pprof`. | | `--help-extended` | $DTR_EXTENDED_HELP | Display extended help text for a given command. | | `--http-proxy` | $DTR_HTTP_PROXY | The HTTP proxy used for outgoing requests. | diff --git a/reference/ucp/3.1/api/index.md b/reference/ucp/3.1/api/index.md deleted file mode 100644 index b90bd55b0f..0000000000 --- a/reference/ucp/3.1/api/index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -description: Learn how to use the Universal Control Plane REST API -keywords: ucp, api, reference -title: Universal Control Plane 3.1 API ---- - -
    -
    - - - - - - - - - - - - - - - - - - - - -
    diff --git a/reference/ucp/3.1/api/main.js b/reference/ucp/3.1/api/main.js index b55869fad9..32f1f7a522 100644 --- a/reference/ucp/3.1/api/main.js +++ b/reference/ucp/3.1/api/main.js @@ -3,8 +3,7 @@ window.onload = function() { // Build a system const ui = SwaggerUIBundle({ spec: - {"swagger":"2.0","info":{"description":"The Universal Control Plane API is a REST API, available using HTTPS, that enables programmatic access to swarm resources that are managed by UCP. UCP exposes the full Docker Engine API, so you can extend your existing code with UCP features. The API is secured with role-based access control so that only authorized users can make changes and deploy applications to your Docker swarm.\n\nThe UCP API is accessible in the same IP addresses and domain names that you use to access the web UI. It's the same API that the UCP web UI uses, so everything you can do on the UCP web UI from your browser, you can also do programmatically.\n\nThe system manages swarm resources by using collections, which you access through the `/collection` endpoint. For example, `GET /defaultCollection/` retrieves the default collection for a user. [Learn more about resource collections](https://www.docker.com/ucp-3).\n\n- The `/roles` endpoint lets you enumerate and create custom permissions for accessing collections.\n\n- The `/accounts` endpoint enables managing users, teams, and organizations.\n\n- The `/configs` endpoint gives you access to the swarm's configuration.","title":"UCP API Documentation","version":"1.39"},"paths":{"/_ping":{"get":{"description":"Check the health of a UCP manager.\nUse the `_ping` endpoint to check the health of a single UCP manager node. The UCP manager validates that all of its internal components are working, and it returns either 200, if all components are healthy, or 500, if any aren't healthy.\n\nIf you’re accessing the `_ping` endpoint through a load balancer, you have no way of knowing which UCP manager node isn't healthy, because any manager node may be serving your request. Make sure you’re connecting directly to the URL of a manager node, and not a load balancer.","tags":["UCP"],"summary":"Check the health of a UCP manager.","operationId":"Ping","responses":{"200":{"description":"Success, manager healthy"},"500":{"description":"Failure, manager unhealthy"},"default":{"description":"Success, manager healthy"}}}},"/accounts/":{"get":{"description":"List user and organization accounts.\nLists information about user and organization accounts. Supports sorting and\nfiltering.\nRequires authentication and authorization as any user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"List user and organization accounts.","operationId":"ListAccounts","parameters":[{"type":"string","default":"all","description":"Filter accounts by type or attribute - either \"users\", \"orgs\", \"admins\", \"non-admins\", \"active-users\", \"inactive-users\", or \"all\" (default). These filters cannot be combined in any way.","name":"filter","in":"query"},{"type":"string","default":"","description":"Specifies the ordering of the results - either \"name\" (default) or \"fullName\". Prefix with \"+\" (default) or \"-\" to specify acscending or descending order, respectively.","name":"order","in":"query"},{"type":"string","default":"","description":"Only return accounts with an order marker starting from this value.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of accounts per page of results.","name":"limit","in":"query"},{"type":"string","default":"","description":"Additionally filter results to those which have either a name or full name which contains this case insensitive string","name":"contains","in":"query"}],"responses":{"200":{"description":"Success, page of accounts listed.","schema":{"$ref":"#/definitions/responses.Accounts"}},"default":{"description":"Success, page of accounts listed.","schema":{"$ref":"#/definitions/responses.Accounts"}}}},"post":{"description":"Create a user or organization account.\nTo search for and import a user from an LDAP directory, the system must be\nconfigured with LDAP integration.\nRequires authentication and authorization as an admin user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Create a user or organization account.","operationId":"CreateAccount","parameters":[{"type":"forms.CreateAccount","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.CreateAccount"}}],"responses":{"201":{"description":"Success, account created.","schema":{"$ref":"#/definitions/responses.Account"}}}},"patch":{"description":"Update information about user accounts or organizations, in bulk.\nRequires authentication and authorization as an admin user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Update information about user accounts or organizations, in bulk.","operationId":"BulkAccountOps","parameters":[{"type":"forms.BulkOperations","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.BulkOperations"}}],"responses":{"200":{"description":"Success, bulk operations performed. Any errors encountered for an operation are returned.","schema":{"$ref":"#/definitions/responses.BulkResults"}},"default":{"description":"Success, bulk operations performed. Any errors encountered for an operation are returned.","schema":{"$ref":"#/definitions/responses.BulkResults"}}}}},"/accounts/{accountNameOrID}":{"get":{"description":"Details for a user or organization account.\nRequires authentication and authorization as any user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Details for a user or organization account.","operationId":"GetAccount","parameters":[{"type":"string","default":"","description":"Name or id of account to fetch","name":"accountNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, account returned.","schema":{"$ref":"#/definitions/responses.Account"}},"default":{"description":"Success, account returned.","schema":{"$ref":"#/definitions/responses.Account"}}}},"delete":{"description":"Delete a user or organization account.\nIf the system is configured to import users from an LDAP directory, the user\nmay be created again if they still match the current LDAP search config.\nRequires authentication and authorization as an admin user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Delete a user or organization account.","operationId":"DeleteAccount","parameters":[{"type":"string","default":"","description":"Name or id of account to delete","name":"accountNameOrID","in":"path","required":true}],"responses":{"204":{"description":"Success, account deleted."}}},"patch":{"description":"Update details for a user or organization account.\nRequires authentication and authorization as an admin user, the target user (if\na user), or an admin member of the target organization (if an organization).","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Update details for a user or organization account.","operationId":"UpdateAccount","parameters":[{"type":"string","default":"","description":"Name or id of account to update","name":"accountNameOrID","in":"path","required":true},{"type":"forms.UpdateAccount","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.UpdateAccount"}}],"responses":{"200":{"description":"Success, account updated.","schema":{"$ref":"#/definitions/responses.Account"}},"default":{"description":"Success, account updated.","schema":{"$ref":"#/definitions/responses.Account"}}}}},"/accounts/{accountNameOrID}/publicKeys":{"get":{"description":"List accountPublicKeys in an account.\nLists accountPublicKeys in ascending order by key ID.\nRequires authentication and authorization as any user.","consumes":["application/json"],"produces":["application/json"],"tags":["Account Public Keys","Accounts"],"summary":"List accountPublicKeys in an account.","operationId":"ListAccountPublicKeys","parameters":[{"type":"string","default":"","description":"Name or id of the account whose accountPublicKeys will be listed","name":"accountNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Only return accountPublicKeys with a key ID greater than or equal to this name.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of accountPublicKeys per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of accountPublicKeys listed.","schema":{"$ref":"#/definitions/responses.AccountPublicKeys"}},"default":{"description":"Success, page of accountPublicKeys listed.","schema":{"$ref":"#/definitions/responses.AccountPublicKeys"}}}},"post":{"description":"Create a public key for an account.\nRequires authentication and authorization as an admin user, the target user (if\na user), or an admin member of the target organization (if an organization).","consumes":["application/json"],"produces":["application/json"],"tags":["Account Public Keys","Accounts"],"summary":"Create a public key for an account.","operationId":"CreateAccountPublicKey","parameters":[{"type":"string","default":"","description":"Name or id of account to fetch","name":"accountNameOrID","in":"path","required":true},{"type":"forms.CreateAccountPublicKey","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.CreateAccountPublicKey"}}],"responses":{"201":{"description":"Success, account public key created.","schema":{"$ref":"#/definitions/responses.AccountPublicKey"}}}}},"/accounts/{accountNameOrID}/publicKeys/{keyID}":{"delete":{"description":"Remove an account public key.\nRequires authentication and authorization as an admin user, the target user (if\na user), or an admin member of the target organization (if an organization).","consumes":["application/json"],"produces":["application/json"],"tags":["Account Public Keys","Accounts"],"summary":"Remove an account public key.","operationId":"DeleteAccountPublicKey","parameters":[{"type":"string","default":"","description":"Name or id of the account","name":"accountNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Public key id of the account","name":"keyID","in":"path","required":true}],"responses":{"204":{"description":"Success, acount public key removed."}}},"patch":{"description":"Update details for an account public key.\nRequires authentication and authorization as an admin user, the target user (if\na user), or an admin member of the target organization (if an organization).","consumes":["application/json"],"produces":["application/json"],"tags":["Account Public Keys","Accounts"],"summary":"Update details for an account public key.","operationId":"UpdateAccountPublicKey","parameters":[{"type":"string","default":"","description":"Name or id of the account","name":"accountNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Public key id of the account","name":"keyID","in":"path","required":true},{"type":"forms.UpdateAccountPublicKey","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.UpdateAccountPublicKey"}}],"responses":{"200":{"description":"Success, account public key updated.","schema":{"$ref":"#/definitions/responses.AccountPublicKey"}},"default":{"description":"Success, account public key updated.","schema":{"$ref":"#/definitions/responses.AccountPublicKey"}}}}},"/accounts/{orgNameOrID}/adminMemberSyncConfig":{"get":{"description":"Get options for syncing admin members of an organization.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Get options for syncing admin members of an organization.","operationId":"GetOrganizationAdminSyncConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization whose LDAP sync options to be retrieved","name":"orgNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, LDAP sync options retrieved.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}},"default":{"description":"Success, LDAP sync options retrieved.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}}}},"put":{"description":"Set options for syncing admin members of an organization.\nEnabling sync of organization admin members will disable the ability to\ndirectly manage organization membership for any users imported from an LDAP\ndirectory. Their organization membership is instead set by being synced as an\nadmin member of the organization or by being a member of any team within the\norganization.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Set options for syncing admin members of an organization.","operationId":"SetOrganizationAdminSyncConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization whose LDAP sync options to set","name":"orgNameOrID","in":"path","required":true},{"type":"forms.MemberSyncOpts","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.MemberSyncOpts"}}],"responses":{"200":{"description":"Success, LDAP sync options set.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}},"default":{"description":"Success, LDAP sync options set.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}}}}},"/accounts/{orgNameOrID}/members":{"get":{"description":"List members of an organization.\nLists memberships in ascending order by user ID.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"List members of an organization.","operationId":"ListOrganizationMembers","parameters":[{"type":"string","default":"","description":"Name or id of organization whose members will be listed","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"all","description":"Filter members by type - either 'admins', 'non-admins', or 'all' (default).","name":"filter","in":"query"},{"type":"string","default":"","description":"Only return members with a user ID greater than or equal to this ID.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of members per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of organization members listed.","schema":{"$ref":"#/definitions/responses.Members"}},"default":{"description":"Success, page of organization members listed.","schema":{"$ref":"#/definitions/responses.Members"}}}}},"/accounts/{orgNameOrID}/members/{memberNameOrID}":{"get":{"description":"Details of a user's membership in an organization.\nRequires authentication and authorization as an admin user, a member of the\norganization, or the target user.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Details of a user's membership in an organization.","operationId":"GetOrganizationMembership","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the membership will be retrieved","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user whose membership will be retrieved","name":"memberNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, membership returned.","schema":{"$ref":"#/definitions/responses.Member"}},"default":{"description":"Success, membership returned.","schema":{"$ref":"#/definitions/responses.Member"}}}},"put":{"description":"Add a user to an organization.\nIf organization admin members are configured to be synced with LDAP, users\nwhich are imported from LDAP cannot be manually added as members of the\norganization and must be either synced as an organization admin member or be\nadded as a member of team within the organization.\nRequires authentication and authorization as an admin user or an admin member\nof the organization","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Add a user to an organization.","operationId":"AddOrganizationMember","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the membership will be added","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user which will be added as a member","name":"memberNameOrID","in":"path","required":true},{"type":"forms.SetMembership","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.SetMembership"}}],"responses":{"200":{"description":"Success, membership set.","schema":{"$ref":"#/definitions/responses.Member"}},"default":{"description":"Success, membership set.","schema":{"$ref":"#/definitions/responses.Member"}}}},"delete":{"description":"Remove a user from an organization.\nRemoving a member of the organization will also remove them from any teams in\nthe organization. If organization admin members are configured to be synced\nwith LDAP, users which are imported from LDAP cannot be manually removed as\nmembers of the organization and must be either synced as an organization admin\nmember or removed as a member of all teams within the organization.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Remove a user from an organization.","operationId":"DeleteOrganizationMember","parameters":[{"type":"string","default":"","description":"Name or id of user whose membership will be deleted","name":"memberNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of organization in which the membership will be deleted","name":"orgNameOrID","in":"path","required":true}],"responses":{"204":{"description":"Success, membership removed."}}}},"/accounts/{orgNameOrID}/members/{memberNameOrID}/teams":{"get":{"description":"List a user's team membership in an organization.\nLists team memberships in ascending order by team ID.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"List a user's team membership in an organization.","operationId":"ListOrganizationMemberTeams","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the member's team memberships will be listed","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user whose memberships will be listed","name":"memberNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Only return team memberships with a team ID greater than or equal to this ID.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of team memberships per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of member's teams listed.","schema":{"$ref":"#/definitions/responses.MemberTeams"}},"default":{"description":"Success, page of member's teams listed.","schema":{"$ref":"#/definitions/responses.MemberTeams"}}}}},"/accounts/{orgNameOrID}/teams":{"get":{"description":"List teams in an organization.\nLists teams in ascending order by name.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"List teams in an organization.","operationId":"ListTeams","parameters":[{"type":"string","default":"","description":"Name or id of organization whose teams will be listed","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Only return teams with a name greater than or equal to this name.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of teams per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of teams listed.","schema":{"$ref":"#/definitions/responses.Teams"}},"default":{"description":"Success, page of teams listed.","schema":{"$ref":"#/definitions/responses.Teams"}}}},"post":{"description":"Create a team.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"Create a team.","operationId":"CreateTeam","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team will be created","name":"orgNameOrID","in":"path","required":true},{"type":"forms.CreateTeam","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.CreateTeam"}}],"responses":{"201":{"description":"Success, team created.","schema":{"$ref":"#/definitions/responses.Team"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}":{"get":{"description":"Details for a team.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"Details for a team.","operationId":"GetTeam","parameters":[{"type":"string","default":"","description":"Name or id of team which will be retrieved","name":"teamNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of organization in which the team will be retrieved","name":"orgNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, team returned.","schema":{"$ref":"#/definitions/responses.Team"}},"default":{"description":"Success, team returned.","schema":{"$ref":"#/definitions/responses.Team"}}}},"delete":{"description":"Delete a team.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"Delete a team.","operationId":"DeleteTeam","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team will be deleted","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team which will be deleted","name":"teamNameOrID","in":"path","required":true}],"responses":{"204":{"description":"Success, team deleted."}}},"patch":{"description":"Update details for a team.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"Update details for a team.","operationId":"UpdateTeam","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team will be updated","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team which will be updated","name":"teamNameOrID","in":"path","required":true},{"type":"forms.UpdateTeam","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.UpdateTeam"}}],"responses":{"200":{"description":"Success, team updated.","schema":{"$ref":"#/definitions/responses.Team"}},"default":{"description":"Success, team updated.","schema":{"$ref":"#/definitions/responses.Team"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}/groupLinkConfig":{"get":{"description":"Get options for linking group of a team.\nRequires authentication and authorization as an admin user, an admin group of\nthe organization, or an admin group of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Get options for linking group of a team.","operationId":"GetTeamGroupLinkConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization to which the team belongs","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team whose SAML link config will be retrieved","name":"teamNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, SAML link options retrieved.","schema":{"$ref":"#/definitions/responses.GroupLinkOpts"}},"default":{"description":"Success, SAML link options retrieved.","schema":{"$ref":"#/definitions/responses.GroupLinkOpts"}}}},"put":{"description":"Set options for linking this team with a group attribute from SAML assertions.\nEnabling link of team members will disable the ability to manually manage team\nmembership for any users imported from SAML. Their team membership is instead\nmanaged by the group attribute of the SAML assertion.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Set options for linking this team with a group attribute from SAML assertions.","operationId":"SetTeamGroupLinkConfig","parameters":[{"type":"string","default":"","description":"Name or id of team whose SAML link config will be set","name":"teamNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of organization to which the team belongs","name":"orgNameOrID","in":"path","required":true},{"type":"forms.GroupLinkOpts","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.GroupLinkOpts"}}],"responses":{"200":{"description":"Success, SAML link options set.","schema":{"$ref":"#/definitions/responses.GroupLinkOpts"}},"default":{"description":"Success, SAML link options set.","schema":{"$ref":"#/definitions/responses.GroupLinkOpts"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}/memberSyncConfig":{"get":{"description":"Get options for syncing members of a team.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Get options for syncing members of a team.","operationId":"GetTeamMemberSyncConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization to which the team belongs","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team whose LDAP sync config will be retrieved","name":"teamNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, LDAP sync options retrieved.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}},"default":{"description":"Success, LDAP sync options retrieved.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}}}},"put":{"description":"Set options for syncing members of a team.\nEnabling sync of team members will disable the ability to manually manage team\nmembership for any users imported from LDAP. Their team membership is instead\nmanaged by the LDAP sync.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Set options for syncing members of a team.","operationId":"SetTeamMemberSyncConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization to which the team belongs","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team whose LDAP sync config will be set","name":"teamNameOrID","in":"path","required":true},{"type":"forms.MemberSyncOpts","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.MemberSyncOpts"}}],"responses":{"200":{"description":"Success, LDAP sync options set.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}},"default":{"description":"Success, LDAP sync options set.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}/members":{"get":{"description":"List members of a team.\nLists memberships in ascending order by user ID.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"List members of a team.","operationId":"ListTeamMembers","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team's members will be listed'","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team whose members will be listed","name":"teamNameOrID","in":"path","required":true},{"type":"string","default":"all","description":"Filter members by type - either 'admins', 'non-admins', or 'all' (default).","name":"filter","in":"query"},{"type":"string","default":"","description":"Only return members with a user ID greater than or equal to this ID.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of members per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of team members listed.","schema":{"$ref":"#/definitions/responses.Members"}},"default":{"description":"Success, page of team members listed.","schema":{"$ref":"#/definitions/responses.Members"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}/members/{memberNameOrID}":{"get":{"description":"Details of a user's membership in a team.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Details of a user's membership in a team.","operationId":"GetTeamMembership","parameters":[{"type":"string","default":"","description":"Name or id of user whose team membership will be retrieved","name":"memberNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of organization in which the team membership will be retrieved","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of the team in which the membership will be retrieved","name":"teamNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, team membership retuned.","schema":{"$ref":"#/definitions/responses.Member"}},"default":{"description":"Success, team membership retuned.","schema":{"$ref":"#/definitions/responses.Member"}}}},"put":{"description":"Add a user to a team.\nThe user will be added as a member of the organization if they are not already.\nIf team members are configured to be synced with LDAP, users which are imported\nfrom LDAP cannot be manually added as members of the team and must be synced\nwith LDAP.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Add a user to a team.","operationId":"AddTeamMember","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team membership will be added","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of the team in which the membership will be added","name":"teamNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user which will be added as a member","name":"memberNameOrID","in":"path","required":true},{"type":"forms.SetMembership","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.SetMembership"}}],"responses":{"200":{"description":"Success, team membership set.","schema":{"$ref":"#/definitions/responses.Member"}},"default":{"description":"Success, team membership set.","schema":{"$ref":"#/definitions/responses.Member"}}}},"delete":{"description":"Remove a member from a team.\nThe user will remain a member of the organization. If team members are\nconfigured to be synced with LDAP, users which are imported from LDAP cannot be\nmanually removed as members of the team and must be synced with LDAP.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Remove a member from a team.","operationId":"DeleteTeamMember","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team membership will be deleted","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of the team in which the membership will be deleted","name":"teamNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user whose team membership will be deleted","name":"memberNameOrID","in":"path","required":true}],"responses":{"204":{"description":"Success, team membership deleted."}}}},"/accounts/{userNameOrID}/changePassword":{"post":{"description":"Change a user's password.\nRequires authentication and authorization as an admin user or the target user.","consumes":["application/json"],"produces":["application/json"],"tags":["User Accounts","Accounts"],"summary":"Change a user's password.","operationId":"ChangePassword","parameters":[{"type":"string","default":"","description":"Username or id of user whose password is to be changed","name":"userNameOrID","in":"path","required":true},{"type":"forms.ChangePassword","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.ChangePassword"}}],"responses":{"200":{"description":"Success, password changed.","schema":{"$ref":"#/definitions/responses.Account"}},"default":{"description":"Success, password changed.","schema":{"$ref":"#/definitions/responses.Account"}}}}},"/accounts/{userNameOrID}/organizations":{"get":{"description":"List a user's organization memberships.\nLists organization memberships in ascending order by organization ID.\nRequires authentication and authorization as an admin user or the target user.","consumes":["application/json"],"produces":["application/json"],"tags":["User Accounts","Accounts"],"summary":"List a user's organization memberships.","operationId":"ListUserOrganizations","parameters":[{"type":"string","default":"","description":"Name or id of user to whose organizations will be listed","name":"userNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Only return memberships with an org ID greater than or equal to this ID.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of organizations per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of user's organizations listed.","schema":{"$ref":"#/definitions/responses.MemberOrgs"}},"default":{"description":"Success, page of user's organizations listed.","schema":{"$ref":"#/definitions/responses.MemberOrgs"}}}}},"/api/composehelper":{"get":{"tags":["UCP"],"summary":"/api/composehelper","operationId":"restfulNoop","responses":{}}},"/api/ucp/app/render":{"post":{"tags":["UCP"],"summary":"/api/ucp/app/render","operationId":"restfulNoop","responses":{}}},"/api/ucp/config-toml":{"get":{"description":"Export the current UCP Configuration as a TOML file.","produces":["application/toml"],"tags":["UCP"],"summary":"Export the current UCP Configuration as a TOML file.","operationId":"Get Config TOML","responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/config.UCPConfiguration"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/config.UCPConfiguration"}}}},"put":{"description":"Import UCP Configuration from a TOML file.","consumes":["application/toml"],"produces":["application/json"],"tags":["UCP"],"summary":"Import UCP Configuration from a TOML file.","operationId":"PUT Config TOML","responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/api.putConfigOrLicenseResponse"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/api.putConfigOrLicenseResponse"}}}}},"/auth":{"post":{"description":"Validate credentials for a registry and, if available, get an identity token for accessing the registry without password.","consumes":["application/json"],"produces":["application/json"],"tags":["System"],"summary":"Check auth configuration","operationId":"SystemAuth","parameters":[{"description":"Authentication to check","name":"authConfig","in":"body","schema":{"$ref":"#/definitions/AuthConfig"}}],"responses":{"200":{"description":"An identity token was generated successfully.","schema":{"type":"object","title":"SystemAuthResponse","required":["Status"],"properties":{"IdentityToken":{"description":"An opaque token used to authenticate a user after a successful login","type":"string","x-nullable":false},"Status":{"description":"The status of the authentication","type":"string","x-nullable":false}}},"examples":{"application/json":{"IdentityToken":"9cbaf023786cd7...","Status":"Login Succeeded"}}},"204":{"description":"No error"},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/auth/login":{"post":{"description":"Submit a Login Form in exchange for a Session Token.","consumes":["application/json"],"tags":["UCP"],"summary":"Submit a Login Form in exchange for a Session Token.","operationId":"Login","parameters":[{"type":"auth.Credentials","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/auth.Credentials"}}],"responses":{"200":{"description":"Success, login response returned.","schema":{"$ref":"#/definitions/auth.LoginResponse"}},"401":{"description":"Invalid username or password."},"default":{"description":"Success, login response returned.","schema":{"$ref":"#/definitions/auth.LoginResponse"}}}}},"/build":{"post":{"description":"Build an image from a tar archive with a `Dockerfile` in it.\n\nThe `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/).\n\nThe Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output.\n\nThe build is canceled if the client drops the connection by quitting or being killed.\n","consumes":["application/octet-stream"],"produces":["application/json"],"tags":["Image"],"summary":"Build an image","operationId":"ImageBuild","parameters":[{"description":"A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz.","name":"inputStream","in":"body","schema":{"type":"string","format":"binary"}},{"type":"string","default":"Dockerfile","description":"Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`.","name":"dockerfile","in":"query"},{"type":"string","description":"A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters.","name":"t","in":"query"},{"type":"string","description":"Extra hosts to add to /etc/hosts","name":"extrahosts","in":"query"},{"type":"string","description":"A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball.","name":"remote","in":"query"},{"type":"boolean","default":false,"description":"Suppress verbose build output.","name":"q","in":"query"},{"type":"boolean","default":false,"description":"Do not use the cache when building the image.","name":"nocache","in":"query"},{"type":"string","description":"JSON array of images used for build cache resolution.","name":"cachefrom","in":"query"},{"type":"string","description":"Attempt to pull the image even if an older image exists locally.","name":"pull","in":"query"},{"type":"boolean","default":true,"description":"Remove intermediate containers after a successful build.","name":"rm","in":"query"},{"type":"boolean","default":false,"description":"Always remove intermediate containers, even upon failure.","name":"forcerm","in":"query"},{"type":"integer","description":"Set memory limit for build.","name":"memory","in":"query"},{"type":"integer","description":"Total memory (memory + swap). Set as `-1` to disable swap.","name":"memswap","in":"query"},{"type":"integer","description":"CPU shares (relative weight).","name":"cpushares","in":"query"},{"type":"string","description":"CPUs in which to allow execution (e.g., `0-3`, `0,1`).","name":"cpusetcpus","in":"query"},{"type":"integer","description":"The length of a CPU period in microseconds.","name":"cpuperiod","in":"query"},{"type":"integer","description":"Microseconds of CPU time that the container can get in a CPU period.","name":"cpuquota","in":"query"},{"type":"string","description":"JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values.\n\nFor example, the build arg `FOO=bar` would become `{\"FOO\":\"bar\"}` in JSON. This would result in the the query parameter `buildargs={\"FOO\":\"bar\"}`. Note that `{\"FOO\":\"bar\"}` should be URI component encoded.\n\n[Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)\n","name":"buildargs","in":"query"},{"type":"integer","description":"Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB.","name":"shmsize","in":"query"},{"type":"boolean","description":"Squash the resulting images layers into a single layer. *(Experimental release only.)*","name":"squash","in":"query"},{"type":"string","description":"Arbitrary key/value labels to set on the image, as a JSON map of string pairs.","name":"labels","in":"query"},{"type":"string","description":"Sets the networking mode for the run commands during build. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken as a custom network's name to which this container should connect to.","name":"networkmode","in":"query"},{"enum":["application/x-tar"],"type":"string","default":"application/x-tar","name":"Content-type","in":"header"},{"type":"string","description":"This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to.\n\nThe key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example:\n\n```\n{\n \"docker.example.com\": {\n \"username\": \"janedoe\",\n \"password\": \"hunter2\"\n },\n \"https://index.docker.io/v1/\": {\n \"username\": \"mobydock\",\n \"password\": \"conta1n3rize14\"\n }\n}\n```\n\nOnly the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API.\n","name":"X-Registry-Config","in":"header"},{"type":"string","default":"","description":"Platform in the format os[/arch[/variant]]","name":"platform","in":"query"},{"type":"string","default":"","description":"Target build stage","name":"target","in":"query"}],"responses":{"200":{"description":"no error"},"400":{"description":"Bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/collectionByPath":{"get":{"description":"Retrieve a single collection by path.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve a single collection by path.","operationId":"Get Collection by path","parameters":[{"type":"string","default":"","description":"Path of the collection to get.","name":"path","in":"query"}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}}}}},"/collectionGrants":{"get":{"description":"Lists all collection grants","produces":["application/json"],"tags":["UCP"],"summary":"Lists all collection grants","operationId":"ListGrants","parameters":[{"type":"string","default":"","description":"Filter grants by subjectID. Only a single value may be specified for this query parameter. A subjectID may be an account ID for a user or organization, or a team ID.","name":"subjectID","in":"query"},{"type":"string","default":"","description":"Filter grants by collection ID. Only a single value may be specified for this query parameter.","name":"objectID","in":"query"},{"type":"string","default":"","description":"Filter grants by roleID. Only a single value may be specified for this query parameter.","name":"roleID","in":"query"},{"type":"string","default":"all","description":"Filter grants by a subject type - either \"agent\", \"all\" (default), \"anonymous\", \"authenticated\", \"org\", \"team\", or \"user\" . These filters cannot be combined in any way.","name":"subjectType","in":"query"},{"type":"boolean","default":"false","description":"Expands the subject into a list of subjects that it belongs to.","name":"expandUser","in":"query"},{"type":"boolean","default":"false","description":"Include the collection paths in the response.","name":"showPaths","in":"query"},{"type":"string","default":"","description":"Only return grants with an order marker starting from this value.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of grants per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/responses.Grants"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/responses.Grants"}}}}},"/collectionGrants/{subjectID}/{objectID}/{roleID}":{"put":{"description":"Creates a collection grant","tags":["UCP"],"summary":"Creates a collection grant","operationId":"CreateGrant","parameters":[{"type":"string","default":"","description":"SubjectID of grant to create. For a service account, it should follow the format `system:serviceaccount::`","name":"subjectID","in":"path","required":true},{"type":"string","default":"","description":"ObjectID of grant to create","name":"objectID","in":"path","required":true},{"type":"string","default":"","description":"RoleID of grant to create","name":"roleID","in":"path","required":true}],"responses":{"201":{"description":"Success"}}},"delete":{"description":"Deletes a collection grant.","tags":["UCP"],"summary":"Deletes a collection grant.","operationId":"DeleteGrant","parameters":[{"type":"string","default":"","description":"SubjectID of grant to delete","name":"subjectID","in":"path","required":true},{"type":"string","default":"","description":"ObjectID of grant to delete","name":"objectID","in":"path","required":true},{"type":"string","default":"","description":"RoleID of grant to delete","name":"roleID","in":"path","required":true}],"responses":{"204":{"description":"Success"}}}},"/collections":{"get":{"description":"List all visible collections.","produces":["application/json"],"tags":["UCP"],"summary":"List all visible collections.","operationId":"List collections","parameters":[{"type":"string","default":"","description":"Only return collections with an order marker starting from this value.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of collections per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.Collection"}}},"default":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.Collection"}}}}},"post":{"description":"Create a new collection of resources that share mutual authorization settings.","consumes":["application/json"],"produces":["application/json"],"tags":["UCP"],"summary":"Create a new collection of resources that share mutual authorization settings.","operationId":"Create Collection","parameters":[{"type":"authz.CollectionCreate","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/authz.CollectionCreate"}}],"responses":{"201":{"description":"Success","schema":{"$ref":"#/definitions/authz.CollectionCreateResponse"}}}}},"/collections/{id}":{"get":{"description":"Retrieve a single collection by ID.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve a single collection by ID.","operationId":"Get Collection","parameters":[{"type":"string","default":"","description":"ID of the collection to get","name":"id","in":"path","required":true}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}}}},"delete":{"description":"Delete a single collection by ID.","tags":["UCP"],"summary":"Delete a single collection by ID.","operationId":"Delete Collection","parameters":[{"type":"string","default":"","description":"ID of the collection to delete.","name":"id","in":"path","required":true}],"responses":{"204":{"description":"Success"}}},"patch":{"description":"Updates an existing collection","consumes":["application/json"],"tags":["UCP"],"summary":"Updates an existing collection","operationId":"Update Collection","parameters":[{"type":"string","default":"","description":"ID of the collection to update.","name":"id","in":"path","required":true},{"type":"authz.CollectionUpdate","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/authz.CollectionUpdate"}}],"responses":{"200":{"description":"Success"},"default":{"description":"Success"}}}},"/collections/{id}/children":{"get":{"description":"Retrieve all children collection to a specific collection.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve all children collection to a specific collection.","operationId":"Get Collection Children","parameters":[{"type":"string","default":"","description":"ID of the collection whose children will be returned","name":"id","in":"path","required":true},{"type":"string","default":"","description":"Only return collections with an order marker starting from this value.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of collections per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.Collection"}}},"default":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.Collection"}}}}}},"/commit":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Image"],"summary":"Create a new image from a container","operationId":"ImageCommit","parameters":[{"description":"The container configuration","name":"containerConfig","in":"body","schema":{"$ref":"#/definitions/ContainerConfig"}},{"type":"string","description":"The ID or name of the container to commit","name":"container","in":"query"},{"type":"string","description":"Repository name for the created image","name":"repo","in":"query"},{"type":"string","description":"Tag name for the create image","name":"tag","in":"query"},{"type":"string","description":"Commit message","name":"comment","in":"query"},{"type":"string","description":"Author of the image (e.g., `John Hannibal Smith `)","name":"author","in":"query"},{"type":"boolean","default":true,"description":"Whether to pause the container before committing","name":"pause","in":"query"},{"type":"string","description":"`Dockerfile` instructions to apply while committing","name":"changes","in":"query"}],"responses":{"201":{"description":"no error","schema":{"$ref":"#/definitions/IdResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/configs":{"get":{"produces":["application/json"],"tags":["Config"],"summary":"List configs","operationId":"ConfigList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters:\n\n- `id=`\n- `label= or label==value`\n- `name=`\n- `names=`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Config"},"example":[{"CreatedAt":"2016-11-05T01:20:17.327670065Z","ID":"ktnbjxoalbkvbvedmg1urrz8h","Spec":{"Name":"server.conf"},"UpdatedAt":"2016-11-05T01:20:17.327670065Z","Version":{"Index":11}}]}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/configs/create":{"post":{"description":"If you create a UCP config with a name that starts with `com.docker.ucp.config`, UCP verifies that the config is valid before saving it. Also, UCP validates any licenses with names that start with `com.docker.license`.","consumes":["application/json"],"produces":["application/json"],"tags":["Config"],"summary":"Create a config","operationId":"ConfigCreate","parameters":[{"name":"body","in":"body","schema":{"allOf":[{"$ref":"#/definitions/ConfigSpec"},{"type":"object","example":{"Data":"VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==","Labels":{"foo":"bar"},"Name":"server.conf"}}]}}],"responses":{"201":{"description":"no error","schema":{"$ref":"#/definitions/IdResponse"}},"409":{"description":"name conflicts with an existing object","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/configs/{id}":{"get":{"produces":["application/json"],"tags":["Config"],"summary":"Inspect a config","operationId":"ConfigInspect","parameters":[{"type":"string","description":"ID of the config","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Config"},"examples":{"application/json":{"CreatedAt":"2016-11-05T01:20:17.327670065Z","ID":"ktnbjxoalbkvbvedmg1urrz8h","Spec":{"Name":"app-dev.crt"},"UpdatedAt":"2016-11-05T01:20:17.327670065Z","Version":{"Index":11}}}},"404":{"description":"config not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"produces":["application/json"],"tags":["Config"],"summary":"Delete a config","operationId":"ConfigDelete","parameters":[{"type":"string","description":"ID of the config","name":"id","in":"path","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"config not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/configs/{id}/update":{"post":{"tags":["Config"],"summary":"Update a Config","operationId":"ConfigUpdate","parameters":[{"type":"string","description":"The ID or name of the config","name":"id","in":"path","required":true},{"description":"The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values.","name":"body","in":"body","schema":{"$ref":"#/definitions/ConfigSpec"}},{"type":"integer","format":"int64","description":"The version number of the config object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such config","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/create":{"post":{"consumes":["application/json","application/octet-stream"],"produces":["application/json"],"tags":["Container"],"summary":"Create a container","operationId":"ContainerCreate","parameters":[{"pattern":"/?[a-zA-Z0-9_-]+","type":"string","description":"Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`.","name":"name","in":"query"},{"description":"Container to create","name":"body","in":"body","required":true,"schema":{"allOf":[{"$ref":"#/definitions/ContainerConfig","example":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["date"],"Domainname":"","Entrypoint":"","Env":["FOO=bar","BAZ=quux"],"ExposedPorts":{"22/tcp":{}},"HostConfig":{"AutoRemove":true,"Binds":["/tmp:/tmp"],"BlkioDeviceReadBps":[{}],"BlkioDeviceReadIOps":[{}],"BlkioDeviceWriteBps":[{}],"BlkioDeviceWriteIOps":[{}],"BlkioWeight":300,"BlkioWeightDevice":[{}],"CapAdd":["NET_ADMIN"],"CapDrop":["MKNOD"],"CgroupParent":"","CpuPercent":80,"CpuPeriod":100000,"CpuQuota":50000,"CpuRealtimePeriod":1000000,"CpuRealtimeRuntime":10000,"CpuShares":512,"CpusetCpus":"0,1","CpusetMems":"0,1","Devices":[],"Dns":["8.8.8.8"],"DnsOptions":[""],"DnsSearch":[""],"GroupAdd":["newgroup"],"KernelMemory":0,"Links":["redis3:redis"],"LogConfig":{"Config":{},"Type":"json-file"},"MaximumIOBps":0,"MaximumIOps":0,"Memory":0,"MemoryReservation":0,"MemorySwap":0,"MemorySwappiness":60,"NanoCPUs":500000,"NetworkMode":"bridge","OomKillDisable":false,"OomScoreAdj":500,"PidMode":"","PidsLimit":-1,"PortBindings":{"22/tcp":[{"HostPort":"11022"}]},"Privileged":false,"PublishAllPorts":false,"ReadonlyRootfs":false,"RestartPolicy":{"MaximumRetryCount":0,"Name":""},"SecurityOpt":[],"ShmSize":67108864,"StorageOpt":{},"Ulimits":[{}],"VolumeDriver":"","VolumesFrom":["parent","other:ro"]},"Hostname":"","Image":"ubuntu","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"12:34:56:78:9a:bc","NetworkDisabled":false,"NetworkingConfig":{"EndpointsConfig":{"isolated_nw":{"Aliases":["server_x","server_y"],"IPAMConfig":{"IPv4Address":"172.20.30.33","IPv6Address":"2001:db8:abcd::3033","LinkLocalIPs":["169.254.34.68","fe80::3468"]},"Links":["container_1","container_2"]}}},"OpenStdin":false,"StdinOnce":false,"StopSignal":"SIGTERM","StopTimeout":10,"Tty":false,"User":"","Volumes":{"/volumes/data":{}},"WorkingDir":""}},{"type":"object","properties":{"HostConfig":{"$ref":"#/definitions/HostConfig"},"NetworkingConfig":{"description":"This container's networking configuration.","type":"object","properties":{"EndpointsConfig":{"description":"A mapping of network name to endpoint configuration for that network.","type":"object","additionalProperties":{"$ref":"#/definitions/EndpointSettings"}}}}},"example":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["date"],"Domainname":"","Entrypoint":"","Env":["FOO=bar","BAZ=quux"],"ExposedPorts":{"22/tcp":{}},"HostConfig":{"AutoRemove":true,"Binds":["/tmp:/tmp"],"BlkioDeviceReadBps":[{}],"BlkioDeviceReadIOps":[{}],"BlkioDeviceWriteBps":[{}],"BlkioDeviceWriteIOps":[{}],"BlkioWeight":300,"BlkioWeightDevice":[{}],"CapAdd":["NET_ADMIN"],"CapDrop":["MKNOD"],"CgroupParent":"","CpuPercent":80,"CpuPeriod":100000,"CpuQuota":50000,"CpuRealtimePeriod":1000000,"CpuRealtimeRuntime":10000,"CpuShares":512,"CpusetCpus":"0,1","CpusetMems":"0,1","Devices":[],"Dns":["8.8.8.8"],"DnsOptions":[""],"DnsSearch":[""],"GroupAdd":["newgroup"],"KernelMemory":0,"Links":["redis3:redis"],"LogConfig":{"Config":{},"Type":"json-file"},"MaximumIOBps":0,"MaximumIOps":0,"Memory":0,"MemoryReservation":0,"MemorySwap":0,"MemorySwappiness":60,"NanoCPUs":500000,"NetworkMode":"bridge","OomKillDisable":false,"OomScoreAdj":500,"PidMode":"","PidsLimit":-1,"PortBindings":{"22/tcp":[{"HostPort":"11022"}]},"Privileged":false,"PublishAllPorts":false,"ReadonlyRootfs":false,"RestartPolicy":{"MaximumRetryCount":0,"Name":""},"SecurityOpt":[],"ShmSize":67108864,"StorageOpt":{},"Ulimits":[{}],"VolumeDriver":"","VolumesFrom":["parent","other:ro"]},"Hostname":"","Image":"ubuntu","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"12:34:56:78:9a:bc","NetworkDisabled":false,"NetworkingConfig":{"EndpointsConfig":{"isolated_nw":{"Aliases":["server_x","server_y"],"IPAMConfig":{"IPv4Address":"172.20.30.33","IPv6Address":"2001:db8:abcd::3033","LinkLocalIPs":["169.254.34.68","fe80::3468"]},"Links":["container_1","container_2"]}}},"OpenStdin":false,"StdinOnce":false,"StopSignal":"SIGTERM","StopTimeout":10,"Tty":false,"User":"","Volumes":{"/volumes/data":{}},"WorkingDir":""}}],"example":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["date"],"Domainname":"","Entrypoint":"","Env":["FOO=bar","BAZ=quux"],"ExposedPorts":{"22/tcp":{}},"HostConfig":{"AutoRemove":true,"Binds":["/tmp:/tmp"],"BlkioDeviceReadBps":[{}],"BlkioDeviceReadIOps":[{}],"BlkioDeviceWriteBps":[{}],"BlkioDeviceWriteIOps":[{}],"BlkioWeight":300,"BlkioWeightDevice":[{}],"CapAdd":["NET_ADMIN"],"CapDrop":["MKNOD"],"CgroupParent":"","CpuPercent":80,"CpuPeriod":100000,"CpuQuota":50000,"CpuRealtimePeriod":1000000,"CpuRealtimeRuntime":10000,"CpuShares":512,"CpusetCpus":"0,1","CpusetMems":"0,1","Devices":[],"Dns":["8.8.8.8"],"DnsOptions":[""],"DnsSearch":[""],"GroupAdd":["newgroup"],"KernelMemory":0,"Links":["redis3:redis"],"LogConfig":{"Config":{},"Type":"json-file"},"MaximumIOBps":0,"MaximumIOps":0,"Memory":0,"MemoryReservation":0,"MemorySwap":0,"MemorySwappiness":60,"NanoCPUs":500000,"NetworkMode":"bridge","OomKillDisable":false,"OomScoreAdj":500,"PidMode":"","PidsLimit":-1,"PortBindings":{"22/tcp":[{"HostPort":"11022"}]},"Privileged":false,"PublishAllPorts":false,"ReadonlyRootfs":false,"RestartPolicy":{"MaximumRetryCount":0,"Name":""},"SecurityOpt":[],"ShmSize":67108864,"StorageOpt":{},"Ulimits":[{}],"VolumeDriver":"","VolumesFrom":["parent","other:ro"]},"Hostname":"","Image":"ubuntu","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"12:34:56:78:9a:bc","NetworkDisabled":false,"NetworkingConfig":{"EndpointsConfig":{"isolated_nw":{"Aliases":["server_x","server_y"],"IPAMConfig":{"IPv4Address":"172.20.30.33","IPv6Address":"2001:db8:abcd::3033","LinkLocalIPs":["169.254.34.68","fe80::3468"]},"Links":["container_1","container_2"]}}},"OpenStdin":false,"StdinOnce":false,"StopSignal":"SIGTERM","StopTimeout":10,"Tty":false,"User":"","Volumes":{"/volumes/data":{}},"WorkingDir":""}}}],"responses":{"201":{"description":"Container created successfully","schema":{"description":"OK response to ContainerCreate operation","type":"object","title":"ContainerCreateResponse","required":["Id","Warnings"],"properties":{"Id":{"description":"The ID of the created container","type":"string","x-nullable":false},"Warnings":{"description":"Warnings encountered when creating the container","type":"array","items":{"type":"string"},"x-nullable":false}}},"examples":{"application/json":{"Id":"e90e34656806","Warnings":[]}}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"conflict","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/json":{"get":{"description":"Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect).\n\nNote that it uses a different, smaller representation of a container than inspecting a single container. For example,\nthe list of linked containers is not propagated .\n","produces":["application/json"],"tags":["Container"],"summary":"List containers","operationId":"ContainerList","parameters":[{"type":"boolean","default":false,"description":"Return all containers. By default, only running containers are shown","name":"all","in":"query"},{"type":"integer","description":"Return this number of most recently created containers, including non-running ones.","name":"limit","in":"query"},{"type":"boolean","default":false,"description":"Return the size of container as fields `SizeRw` and `SizeRootFs`.","name":"size","in":"query"},{"type":"string","description":"Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{\"status\": [\"paused\"]}` will only return paused containers. Available filters:\n\n- `ancestor`=(`[:]`, ``, or ``)\n- `before`=(`` or ``)\n- `expose`=(`[/]`|`/[]`)\n- `exited=` containers with exit code of ``\n- `health`=(`starting`|`healthy`|`unhealthy`|`none`)\n- `id=` a container's ID\n- `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)\n- `is-task=`(`true`|`false`)\n- `label=key` or `label=\"key=value\"` of a container label\n- `name=` a container's name\n- `network`=(`` or ``)\n- `publish`=(`[/]`|`/[]`)\n- `since`=(`` or ``)\n- `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)\n- `volume`=(`` or ``)\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/ContainerSummary"},"examples":{"application/json":[{"Command":"echo 1","Created":1367854155,"HostConfig":{"NetworkMode":"default"},"Id":"8dfafdbc3a40","Image":"ubuntu:latest","ImageID":"d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"Mounts":[{"Destination":"/data","Driver":"local","Mode":"ro,Z","Name":"fac362...80535","Propagation":"","RW":false,"Source":"/data"}],"Names":["/boring_feynman"],"NetworkSettings":{"Networks":{"bridge":{"EndpointID":"2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.2","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:11:00:02","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}}},"Ports":[{"PrivatePort":2222,"PublicPort":3333,"Type":"tcp"}],"SizeRootFs":0,"SizeRw":12288,"State":"Exited","Status":"Exit 0"},{"Command":"echo 222222","Created":1367854155,"HostConfig":{"NetworkMode":"default"},"Id":"9cd87474be90","Image":"ubuntu:latest","ImageID":"d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82","Labels":{},"Mounts":[],"Names":["/coolName"],"NetworkSettings":{"Networks":{"bridge":{"EndpointID":"88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.8","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:11:00:08","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}}},"Ports":[],"SizeRootFs":0,"SizeRw":12288,"State":"Exited","Status":"Exit 0"},{"Command":"echo 3333333333333333","Created":1367854154,"HostConfig":{"NetworkMode":"default"},"Id":"3176a2479c92","Image":"ubuntu:latest","ImageID":"d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82","Labels":{},"Mounts":[],"Names":["/sleepy_dog"],"NetworkSettings":{"Networks":{"bridge":{"EndpointID":"8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.6","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:11:00:06","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}}},"Ports":[],"SizeRootFs":0,"SizeRw":12288,"State":"Exited","Status":"Exit 0"},{"Command":"echo 444444444444444444444444444444444","Created":1367854152,"HostConfig":{"NetworkMode":"default"},"Id":"4cb07b47f9fb","Image":"ubuntu:latest","ImageID":"d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82","Labels":{},"Mounts":[],"Names":["/running_cat"],"NetworkSettings":{"Networks":{"bridge":{"EndpointID":"d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.5","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:11:00:05","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}}},"Ports":[],"SizeRootFs":0,"SizeRw":12288,"State":"Exited","Status":"Exit 0"}]}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/prune":{"post":{"produces":["application/json"],"tags":["Container"],"summary":"Delete stopped containers","operationId":"ContainerPrune","parameters":[{"type":"string","description":"Filters to process on the prune list, encoded as JSON (a `map[string][]string`).\n\nAvailable filters:\n- `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.\n- `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"ContainerPruneResponse","properties":{"ContainersDeleted":{"description":"Container IDs that were deleted","type":"array","items":{"type":"string"}},"SpaceReclaimed":{"description":"Disk space reclaimed in bytes","type":"integer","format":"int64"}}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}":{"delete":{"tags":["Container"],"summary":"Remove a container","operationId":"ContainerDelete","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Remove the volumes associated with the container.","name":"v","in":"query"},{"type":"boolean","default":false,"description":"If the container is running, kill it before removing it.","name":"force","in":"query"},{"type":"boolean","default":false,"description":"Remove the specified link associated with the container.","name":"link","in":"query"}],"responses":{"204":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"conflict","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/archive":{"get":{"description":"Get a tar archive of a resource in the filesystem of container id.","produces":["application/x-tar"],"tags":["Container"],"summary":"Get an archive of a filesystem resource in a container","operationId":"ContainerArchive","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Resource in the container’s filesystem to archive.","name":"path","in":"query","required":true}],"responses":{"200":{"description":"no error"},"400":{"description":"Bad parameter","schema":{"allOf":[{"$ref":"#/definitions/ErrorResponse"},{"type":"object","properties":{"message":{"description":"The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file).","type":"string","x-nullable":false}}}]}},"404":{"description":"Container or path does not exist","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"put":{"description":"Upload a tar archive to be extracted to a path in the filesystem of container id.","consumes":["application/x-tar","application/octet-stream"],"tags":["Container"],"summary":"Extract an archive of files or folders to a directory in a container","operationId":"PutContainerArchive","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Path to a directory in the container to extract the archive’s contents into. ","name":"path","in":"query","required":true},{"type":"string","description":"If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa.","name":"noOverwriteDirNonDir","in":"query"},{"description":"The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz.","name":"inputStream","in":"body","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"The content was extracted successfully"},"400":{"description":"Bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"403":{"description":"Permission denied, the volume or container rootfs is marked as read-only.","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"No such container or path does not exist inside the container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"head":{"description":"A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path.","tags":["Container"],"summary":"Get information about files in a container","operationId":"ContainerArchiveInfo","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Resource in the container’s filesystem to archive.","name":"path","in":"query","required":true}],"responses":{"200":{"description":"no error","headers":{"X-Docker-Container-Path-Stat":{"type":"string","description":"A base64 - encoded JSON object with some filesystem header information about the path"}}},"400":{"description":"Bad parameter","schema":{"allOf":[{"$ref":"#/definitions/ErrorResponse"},{"type":"object","properties":{"message":{"description":"The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file).","type":"string","x-nullable":false}}}]}},"404":{"description":"Container or path does not exist","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/attach":{"post":{"description":"Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached.\n\nEither the `stream` or `logs` parameter must be `true` for this endpoint to do anything.\n\nSee [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details.\n\n### Hijacking\n\nThis endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket.\n\nThis is the response from the daemon for an attach request:\n\n```\nHTTP/1.1 200 OK\nContent-Type: application/vnd.docker.raw-stream\n\n[STREAM]\n```\n\nAfter the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server.\n\nTo hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers.\n\nFor example, the client sends this request to upgrade the connection:\n\n```\nPOST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1\nUpgrade: tcp\nConnection: Upgrade\n```\n\nThe Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream:\n\n```\nHTTP/1.1 101 UPGRADED\nContent-Type: application/vnd.docker.raw-stream\nConnection: Upgrade\nUpgrade: tcp\n\n[STREAM]\n```\n\n### Stream format\n\nWhen the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload.\n\nThe header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`).\n\nIt is encoded on the first eight bytes like this:\n\n```go\nheader := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}\n```\n\n`STREAM_TYPE` can be:\n\n- 0: `stdin` (is written on `stdout`)\n- 1: `stdout`\n- 2: `stderr`\n\n`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian.\n\nFollowing the header is the payload, which is the specified number of bytes of `STREAM_TYPE`.\n\nThe simplest way to implement this protocol is the following:\n\n1. Read 8 bytes.\n2. Choose `stdout` or `stderr` depending on the first byte.\n3. Extract the frame size from the last four bytes.\n4. Read the extracted size and output it on the correct output.\n5. Goto 1.\n\n### Stream format when using a TTY\n\nWhen the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`.\n","produces":["application/vnd.docker.raw-stream"],"tags":["Container"],"summary":"Attach to a container","operationId":"ContainerAttach","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.","name":"detachKeys","in":"query"},{"type":"boolean","default":false,"description":"Replay previous logs from the container.\n\nThis is useful for attaching to a container that has started and you want to output everything since the container started.\n\nIf `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output.\n","name":"logs","in":"query"},{"type":"boolean","default":false,"description":"Stream attached streams from the time the request was made onwards","name":"stream","in":"query"},{"type":"boolean","default":false,"description":"Attach to `stdin`","name":"stdin","in":"query"},{"type":"boolean","default":false,"description":"Attach to `stdout`","name":"stdout","in":"query"},{"type":"boolean","default":false,"description":"Attach to `stderr`","name":"stderr","in":"query"}],"responses":{"101":{"description":"no error, hints proxy about hijacking"},"200":{"description":"no error, no upgrade header found"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/changes":{"get":{"description":"Returns which files in a container's filesystem have been added, deleted,\nor modified. The `Kind` of modification can be one of:\n\n- `0`: Modified\n- `1`: Added\n- `2`: Deleted\n","produces":["application/json"],"tags":["Container"],"summary":"Get changes on a container’s filesystem","operationId":"ContainerChanges","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true}],"responses":{"200":{"description":"The list of changes","schema":{"type":"array","items":{"description":"change item in response to ContainerChanges operation","type":"object","title":"ContainerChangeResponseItem","required":["Path","Kind"],"properties":{"Kind":{"description":"Kind of change","type":"integer","format":"uint8","enum":[0,1,2],"x-nullable":false},"Path":{"description":"Path to file that has changed","type":"string","x-nullable":false}},"x-go-name":"ContainerChangeResponseItem"}},"examples":{"application/json":[{"Kind":0,"Path":"/dev"},{"Kind":1,"Path":"/dev/kmsg"},{"Kind":1,"Path":"/test"}]}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/exec":{"post":{"description":"Run a command inside a running container.","consumes":["application/json"],"produces":["application/json"],"tags":["Exec"],"summary":"Create an exec instance","operationId":"ContainerExec","parameters":[{"description":"Exec configuration","name":"execConfig","in":"body","required":true,"schema":{"type":"object","properties":{"AttachStderr":{"description":"Attach to `stderr` of the exec command.","type":"boolean"},"AttachStdin":{"description":"Attach to `stdin` of the exec command.","type":"boolean"},"AttachStdout":{"description":"Attach to `stdout` of the exec command.","type":"boolean"},"Cmd":{"description":"Command to run, as a string or array of strings.","type":"array","items":{"type":"string"}},"DetachKeys":{"description":"Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.","type":"string"},"Env":{"description":"A list of environment variables in the form `[\"VAR=value\", ...]`.","type":"array","items":{"type":"string"}},"Privileged":{"description":"Runs the exec process with extended privileges.","type":"boolean","default":false},"Tty":{"description":"Allocate a pseudo-TTY.","type":"boolean"},"User":{"description":"The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`.","type":"string"},"WorkingDir":{"description":"The working directory for the exec process inside the container.","type":"string"}},"example":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["date"],"DetachKeys":"ctrl-p,ctrl-q","Env":["FOO=bar","BAZ=quux"],"Tty":false}}},{"type":"string","description":"ID or name of container","name":"id","in":"path","required":true}],"responses":{"201":{"description":"no error","schema":{"$ref":"#/definitions/IdResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"container is paused","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/export":{"get":{"description":"Export the contents of a container as a tarball.","produces":["application/octet-stream"],"tags":["Container"],"summary":"Export a container","operationId":"ContainerExport","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/json":{"get":{"description":"Return low-level information about a container.","produces":["application/json"],"tags":["Container"],"summary":"Inspect a container","operationId":"ContainerInspect","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Return the size of container as fields `SizeRw` and `SizeRootFs`","name":"size","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"object","title":"ContainerInspectResponse","properties":{"AppArmorProfile":{"type":"string"},"Args":{"description":"The arguments to the command being run","type":"array","items":{"type":"string"}},"Config":{"$ref":"#/definitions/ContainerConfig"},"Created":{"description":"The time the container was created","type":"string"},"Driver":{"type":"string"},"ExecIDs":{"description":"IDs of exec instances that are running in the container.","type":"array","items":{"type":"string"},"x-nullable":true},"GraphDriver":{"$ref":"#/definitions/GraphDriverData"},"HostConfig":{"$ref":"#/definitions/HostConfig"},"HostnamePath":{"type":"string"},"HostsPath":{"type":"string"},"Id":{"description":"The ID of the container","type":"string"},"Image":{"description":"The container's image","type":"string"},"LogPath":{"type":"string"},"MountLabel":{"type":"string"},"Mounts":{"type":"array","items":{"$ref":"#/definitions/MountPoint"}},"Name":{"type":"string"},"NetworkSettings":{"$ref":"#/definitions/NetworkSettings"},"Node":{"description":"TODO","type":"object"},"Path":{"description":"The path to the command being run","type":"string"},"ProcessLabel":{"type":"string"},"ResolvConfPath":{"type":"string"},"RestartCount":{"type":"integer"},"SizeRootFs":{"description":"The total size of all the files in this container.","type":"integer","format":"int64"},"SizeRw":{"description":"The size of files that have been created or changed by this container.","type":"integer","format":"int64"},"State":{"description":"The state of the container.","type":"object","properties":{"Dead":{"type":"boolean"},"Error":{"type":"string"},"ExitCode":{"description":"The last exit code of this container","type":"integer"},"FinishedAt":{"description":"The time when this container last exited.","type":"string"},"OOMKilled":{"description":"Whether this container has been killed because it ran out of memory.","type":"boolean"},"Paused":{"description":"Whether this container is paused.","type":"boolean"},"Pid":{"description":"The process ID of this container","type":"integer"},"Restarting":{"description":"Whether this container is restarting.","type":"boolean"},"Running":{"description":"Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the cgroups freezer is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container's state is \"running\".\n","type":"boolean"},"StartedAt":{"description":"The time when this container was last started.","type":"string"},"Status":{"description":"The status of the container. For example, `\"running\"` or `\"exited\"`.\n","type":"string","enum":["created","running","paused","restarting","removing","exited","dead"]}}}}},"examples":{"application/json":{"AppArmorProfile":"","Args":["-c","exit 9"],"Config":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["/bin/sh","-c","exit 9"],"Domainname":"","Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Hostname":"ba033ac44011","Image":"ubuntu","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"","NetworkDisabled":false,"OpenStdin":false,"StdinOnce":false,"StopSignal":"SIGTERM","StopTimeout":10,"Tty":false,"User":"","Volumes":{"/volumes/data":{}},"WorkingDir":""},"Created":"2015-01-06T15:47:31.485331387Z","Driver":"devicemapper","ExecIDs":["b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca","3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4"],"HostConfig":{"BlkioDeviceReadBps":[{}],"BlkioDeviceReadIOps":[{}],"BlkioDeviceWriteBps":[{}],"BlkioDeviceWriteIOps":[{}],"BlkioWeight":0,"BlkioWeightDevice":[{}],"ContainerIDFile":"","CpuPercent":80,"CpuPeriod":100000,"CpuRealtimePeriod":1000000,"CpuRealtimeRuntime":10000,"CpuShares":0,"CpusetCpus":"","CpusetMems":"","Devices":[],"IpcMode":"","KernelMemory":0,"LogConfig":{"Type":"json-file"},"LxcConf":[],"MaximumIOBps":0,"MaximumIOps":0,"Memory":0,"MemoryReservation":0,"MemorySwap":0,"NetworkMode":"bridge","OomKillDisable":false,"OomScoreAdj":500,"PidMode":"","PortBindings":{},"Privileged":false,"PublishAllPorts":false,"ReadonlyRootfs":false,"RestartPolicy":{"MaximumRetryCount":2,"Name":"on-failure"},"ShmSize":67108864,"Sysctls":{"net.ipv4.ip_forward":"1"},"Ulimits":[{}],"VolumeDriver":""},"HostnamePath":"/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname","HostsPath":"/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts","Id":"ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39","Image":"04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2","LogPath":"/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log","MountLabel":"","Mounts":[{"Destination":"/data","Driver":"local","Mode":"ro,Z","Name":"fac362...80535","Propagation":"","RW":false,"Source":"/data"}],"Name":"/boring_euclid","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","Networks":{"bridge":{"EndpointID":"7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.2","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:12:00:02","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}},"SandboxID":"","SandboxKey":""},"Path":"/bin/sh","ProcessLabel":"","ResolvConfPath":"/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf","RestartCount":1,"State":{"Dead":false,"Error":"","ExitCode":9,"FinishedAt":"2015-01-06T15:47:32.080254511Z","OOMKilled":false,"Paused":false,"Pid":0,"Restarting":false,"Running":true,"StartedAt":"2015-01-06T15:47:32.072697474Z","Status":"running"}}}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/kill":{"post":{"description":"Send a POSIX signal to a container, defaulting to killing to the container.","tags":["Container"],"summary":"Kill a container","operationId":"ContainerKill","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","default":"SIGKILL","description":"Signal to send to the container as an integer or string (e.g. `SIGINT`)","name":"signal","in":"query"}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"container is not running","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/logs":{"get":{"description":"Get `stdout` and `stderr` logs from a container.\n\nNote: This endpoint works only for containers with the `json-file` or `journald` logging driver.\n","tags":["Container"],"summary":"Get container logs","operationId":"ContainerLogs","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Return the logs as a stream.\n\nThis will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).\n","name":"follow","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stdout`","name":"stdout","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stderr`","name":"stderr","in":"query"},{"type":"integer","default":0,"description":"Only return logs since this time, as a UNIX timestamp","name":"since","in":"query"},{"type":"integer","default":0,"description":"Only return logs before this time, as a UNIX timestamp","name":"until","in":"query"},{"type":"boolean","default":false,"description":"Add timestamps to every log line","name":"timestamps","in":"query"},{"type":"string","default":"all","description":"Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines.","name":"tail","in":"query"}],"responses":{"101":{"description":"logs returned as a stream","schema":{"type":"string","format":"binary"}},"200":{"description":"logs returned as a string in response body","schema":{"type":"string"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/pause":{"post":{"description":"Use the cgroups freezer to suspend all processes in a container.\n\nTraditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.\n","tags":["Container"],"summary":"Pause a container","operationId":"ContainerPause","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/rename":{"post":{"tags":["Container"],"summary":"Rename a container","operationId":"ContainerRename","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"New name for the container","name":"name","in":"query","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"name already in use","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/resize":{"post":{"description":"Resize the TTY for a container. You must restart the container for the resize to take effect.","consumes":["application/octet-stream"],"produces":["text/plain"],"tags":["Container"],"summary":"Resize a container TTY","operationId":"ContainerResize","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"integer","description":"Height of the tty session in characters","name":"h","in":"query"},{"type":"integer","description":"Width of the tty session in characters","name":"w","in":"query"}],"responses":{"200":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"cannot resize container","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/restart":{"post":{"tags":["Container"],"summary":"Restart a container","operationId":"ContainerRestart","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"integer","description":"Number of seconds to wait before killing the container","name":"t","in":"query"}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/start":{"post":{"tags":["Container"],"summary":"Start a container","operationId":"ContainerStart","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.","name":"detachKeys","in":"query"}],"responses":{"204":{"description":"no error"},"304":{"description":"container already started","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/stats":{"get":{"description":"This endpoint returns a live stream of a container’s resource usage\nstatistics.\n\nThe `precpu_stats` is the CPU statistic of the *previous* read, and is\nused to calculate the CPU usage percentage. It is not an exact copy\nof the `cpu_stats` field.\n\nIf either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is\nnil then for compatibility with older daemons the length of the\ncorresponding `cpu_usage.percpu_usage` array should be used.\n","produces":["application/json"],"tags":["Container"],"summary":"Get container stats based on resource usage","operationId":"ContainerStats","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"boolean","default":true,"description":"Stream the output. If false, the stats will be output once and then it will disconnect.","name":"stream","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"object"},"examples":{"application/json":{"blkio_stats":{},"cpu_stats":{"cpu_usage":{"percpu_usage":[8646879,24472255,36438778,30657443],"total_usage":100215355,"usage_in_kernelmode":30000000,"usage_in_usermode":50000000},"online_cpus":4,"system_cpu_usage":739306590000000,"throttling_data":{"periods":0,"throttled_periods":0,"throttled_time":0}},"memory_stats":{"failcnt":0,"limit":67108864,"max_usage":6651904,"stats":{"active_anon":6537216,"active_file":0,"cache":0,"hierarchical_memory_limit":67108864,"inactive_anon":0,"inactive_file":0,"mapped_file":0,"pgfault":964,"pgmajfault":0,"pgpgin":477,"pgpgout":414,"rss":6537216,"rss_huge":6291456,"total_active_anon":6537216,"total_active_file":0,"total_cache":0,"total_inactive_anon":0,"total_inactive_file":0,"total_mapped_file":0,"total_pgfault":964,"total_pgmajfault":0,"total_pgpgin":477,"total_pgpgout":414,"total_rss":6537216,"total_rss_huge":6291456,"total_unevictable":0,"total_writeback":0,"unevictable":0,"writeback":0},"usage":6537216},"networks":{"eth0":{"rx_bytes":5338,"rx_dropped":0,"rx_errors":0,"rx_packets":36,"tx_bytes":648,"tx_dropped":0,"tx_errors":0,"tx_packets":8},"eth5":{"rx_bytes":4641,"rx_dropped":0,"rx_errors":0,"rx_packets":26,"tx_bytes":690,"tx_dropped":0,"tx_errors":0,"tx_packets":9}},"pids_stats":{"current":3},"precpu_stats":{"cpu_usage":{"percpu_usage":[8646879,24350896,36438778,30657443],"total_usage":100093996,"usage_in_kernelmode":30000000,"usage_in_usermode":50000000},"online_cpus":4,"system_cpu_usage":9492140000000,"throttling_data":{"periods":0,"throttled_periods":0,"throttled_time":0}},"read":"2015-01-08T22:57:31.547920715Z"}}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/stop":{"post":{"tags":["Container"],"summary":"Stop a container","operationId":"ContainerStop","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"integer","description":"Number of seconds to wait before killing the container","name":"t","in":"query"}],"responses":{"204":{"description":"no error"},"304":{"description":"container already stopped","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/top":{"get":{"description":"On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows.","tags":["Container"],"summary":"List processes running inside a container","operationId":"ContainerTop","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","default":"-ef","description":"The arguments to pass to `ps`. For example, `aux`","name":"ps_args","in":"query"}],"responses":{"200":{"description":"no error","schema":{"description":"OK response to ContainerTop operation","type":"object","title":"ContainerTopResponse","properties":{"Processes":{"description":"Each process running in the container, where each is process is an array of values corresponding to the titles","type":"array","items":{"type":"array","items":{"type":"string"}}},"Titles":{"description":"The ps column titles","type":"array","items":{"type":"string"}}}},"examples":{"application/json":{"Processes":[["root","13642","882","0","17:03","pts/0","00:00:00","/bin/bash"],["root","13735","13642","0","17:06","pts/0","00:00:00","sleep 10"]],"Titles":["UID","PID","PPID","C","STIME","TTY","TIME","CMD"]}}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/unpause":{"post":{"description":"Resume a container which has been paused.","tags":["Container"],"summary":"Unpause a container","operationId":"ContainerUnpause","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/wait":{"post":{"description":"Block until a container stops, then returns the exit code.","produces":["application/json"],"tags":["Container"],"summary":"Wait for a container","operationId":"ContainerWait","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","default":"not-running","description":"Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'.","name":"condition","in":"query"}],"responses":{"200":{"description":"The container has exit.","schema":{"description":"OK response to ContainerWait operation","type":"object","title":"ContainerWaitResponse","required":["StatusCode"],"properties":{"Error":{"description":"container waiting error, if any","type":"object","properties":{"Message":{"description":"Details of an error","type":"string"}}},"StatusCode":{"description":"Exit code of the container","type":"integer","x-nullable":false}}}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/defaultCollection/{userID}":{"get":{"description":"Retrieve a user's default collection.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve a user's default collection.","operationId":"Get user default collection","parameters":[{"type":"string","default":"","description":"ID of the user","name":"userID","in":"path","required":true}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}}}},"put":{"description":"Set a user's default collection.","consumes":["application/json"],"tags":["UCP"],"summary":"Set a user's default collection.","operationId":"Set user default collection","parameters":[{"type":"string","default":"","description":"ID of the user","name":"userID","in":"path","required":true},{"type":"authz.CollectionID","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/authz.CollectionID"}}],"responses":{"201":{"description":"Success"}}},"delete":{"description":"Delete the default collection setting for a user","tags":["UCP"],"summary":"Delete the default collection setting for a user","operationId":"DeleteUserDefaultCollection","parameters":[{"type":"string","default":"","description":"ID of the user whose default collection mapping will be deleted","name":"userID","in":"path","required":true}],"responses":{"204":{"description":"Success"}}}},"/defaultCollectionRole":{"get":{"description":"Retrieve the role for the logged-in user's default collection.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve the role for the logged-in user's default collection.","operationId":"Get the logged-in user's role for their default collection","responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/role.Role"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/role.Role"}}}}},"/distribution/{name}/json":{"get":{"description":"Return image digest and platform information by contacting the registry.","produces":["application/json"],"tags":["Distribution"],"summary":"Get image information from the registry","operationId":"DistributionInspect","parameters":[{"type":"string","description":"Image name or id","name":"name","in":"path","required":true}],"responses":{"200":{"description":"descriptor and platform information","schema":{"type":"object","title":"DistributionInspectResponse","required":["Descriptor","Platforms"],"properties":{"Descriptor":{"description":"A descriptor struct containing digest, media type, and size","type":"object","properties":{"Digest":{"type":"string"},"MediaType":{"type":"string"},"Size":{"type":"integer","format":"int64"},"URLs":{"type":"array","items":{"type":"string"}}}},"Platforms":{"description":"An array containing all platforms supported by the image","type":"array","items":{"type":"object","properties":{"Architecture":{"type":"string"},"Features":{"type":"array","items":{"type":"string"}},"OS":{"type":"string"},"OSFeatures":{"type":"array","items":{"type":"string"}},"OSVersion":{"type":"string"},"Variant":{"type":"string"}}}}},"x-go-name":"DistributionInspect"},"examples":{"application/json":{"Descriptor":{"Digest":"sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96","MediaType":"application/vnd.docker.distribution.manifest.v2+json","Size":3987495,"URLs":[""]},"Platforms":[{"Architecture":"amd64","Features":[""],"OS":"linux","OSFeatures":[""],"OSVersion":"","Variant":""}]}}},"401":{"description":"Failed authentication or no image found","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such image: someimage (tag: latest)"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/events":{"get":{"description":"Stream real-time events from the server.\n\nVarious objects within Docker report events when something happens to them.\n\nContainers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update`\n\nImages report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag`\n\nVolumes report these events: `create`, `mount`, `unmount`, and `destroy`\n\nNetworks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove`\n\nThe Docker daemon reports these events: `reload`\n\nServices report these events: `create`, `update`, and `remove`\n\nNodes report these events: `create`, `update`, and `remove`\n\nSecrets report these events: `create`, `update`, and `remove`\n\nConfigs report these events: `create`, `update`, and `remove`\n","produces":["application/json"],"tags":["System"],"summary":"Monitor events","operationId":"SystemEvents","parameters":[{"type":"string","description":"Show events created since this timestamp then stream new events.","name":"since","in":"query"},{"type":"string","description":"Show events created until this timestamp then stop streaming.","name":"until","in":"query"},{"type":"string","description":"A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:\n\n- `config=` config name or ID\n- `container=` container name or ID\n- `daemon=` daemon name or ID\n- `event=` event type\n- `image=` image name or ID\n- `label=` image or container label\n- `network=` network name or ID\n- `node=` node ID\n- `plugin`= plugin name or ID\n- `scope`= local or swarm\n- `secret=` secret name or ID\n- `service=` service name or ID\n- `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config`\n- `volume=` volume name\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"object","title":"SystemEventsResponse","properties":{"Action":{"description":"The type of event","type":"string"},"Actor":{"type":"object","properties":{"Attributes":{"description":"Various key/value attributes of the object, depending on its type","type":"object","additionalProperties":{"type":"string"}},"ID":{"description":"The ID of the object emitting the event","type":"string"}}},"Type":{"description":"The type of object emitting the event","type":"string"},"time":{"description":"Timestamp of event","type":"integer"},"timeNano":{"description":"Timestamp of event, with nanosecond accuracy","type":"integer","format":"int64"}}},"examples":{"application/json":{"Action":"create","Actor":{"Attributes":{"com.example.some-label":"some-label-value","image":"alpine","name":"my-container"},"ID":"ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"},"Type":"container","time":1461943101}}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/exec/{id}/json":{"get":{"description":"Return low-level information about an exec instance.","produces":["application/json"],"tags":["Exec"],"summary":"Inspect an exec instance","operationId":"ExecInspect","parameters":[{"type":"string","description":"Exec instance ID","name":"id","in":"path","required":true}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"ExecInspectResponse","properties":{"CanRemove":{"type":"boolean"},"ContainerID":{"type":"string"},"DetachKeys":{"type":"string"},"ExitCode":{"type":"integer"},"ID":{"type":"string"},"OpenStderr":{"type":"boolean"},"OpenStdin":{"type":"boolean"},"OpenStdout":{"type":"boolean"},"Pid":{"description":"The system process ID for the exec process.","type":"integer"},"ProcessConfig":{"$ref":"#/definitions/ProcessConfig"},"Running":{"type":"boolean"}}},"examples":{"application/json":{"CanRemove":false,"ContainerID":"b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126","DetachKeys":"","ExitCode":2,"ID":"f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b","OpenStderr":true,"OpenStdin":true,"OpenStdout":true,"Pid":42000,"ProcessConfig":{"arguments":["-c","exit 2"],"entrypoint":"sh","privileged":false,"tty":true,"user":"1000"},"Running":false}}},"404":{"description":"No such exec instance","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/exec/{id}/resize":{"post":{"description":"Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance.","tags":["Exec"],"summary":"Resize an exec instance","operationId":"ExecResize","parameters":[{"type":"string","description":"Exec instance ID","name":"id","in":"path","required":true},{"type":"integer","description":"Height of the TTY session in characters","name":"h","in":"query"},{"type":"integer","description":"Width of the TTY session in characters","name":"w","in":"query"}],"responses":{"201":{"description":"No error"},"404":{"description":"No such exec instance","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/exec/{id}/start":{"post":{"description":"Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command.","consumes":["application/json"],"produces":["application/vnd.docker.raw-stream"],"tags":["Exec"],"summary":"Start an exec instance","operationId":"ExecStart","parameters":[{"name":"execStartConfig","in":"body","schema":{"type":"object","properties":{"Detach":{"description":"Detach from the command.","type":"boolean"},"Tty":{"description":"Allocate a pseudo-TTY.","type":"boolean"}},"example":{"Detach":false,"Tty":false}}},{"type":"string","description":"Exec instance ID","name":"id","in":"path","required":true}],"responses":{"200":{"description":"No error"},"404":{"description":"No such exec instance","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"Container is stopped or paused","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/id/":{"get":{"description":"Identify the Currently Authenticated Account.","produces":["application/json"],"tags":["Identity"],"summary":"Identify the Currently Authenticated Account.","operationId":"ID","responses":{"200":{"description":"Success, current account returned."},"default":{"description":"Success, current account returned."}}}},"/id/logout":{"post":{"description":"Delete the current session is use.","produces":["application/json"],"tags":["Identity"],"summary":"Delete the current session is use.","operationId":"Logout","responses":{"204":{"description":"Success, current session deleted."}}}},"/images/create":{"post":{"description":"Create an image by either pulling it from a registry or importing it.\n\nThe `create` request pulls the image onto every node in the swarm that exists at that time and that have the right operating system. Nodes that join the swarm later or that don't have the same base OS as the image won't get the image.","consumes":["text/plain","application/octet-stream"],"produces":["application/json"],"tags":["Image"],"summary":"Create an image","operationId":"ImageCreate","parameters":[{"type":"string","description":"Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed.","name":"fromImage","in":"query"},{"type":"string","description":"Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image.","name":"fromSrc","in":"query"},{"type":"string","description":"Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image.","name":"repo","in":"query"},{"type":"string","description":"Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled.","name":"tag","in":"query"},{"description":"Image content if the value `-` has been specified in fromSrc query parameter","name":"inputImage","in":"body","schema":{"type":"string"}},{"type":"string","description":"A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"},{"type":"string","default":"","description":"Platform in the format os[/arch[/variant]]","name":"platform","in":"query"}],"responses":{"200":{"description":"no error"},"404":{"description":"repository does not exist or no read access","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/get":{"get":{"description":"Get a tarball containing all images and metadata for several image repositories.\n\nFor each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID.\n\nFor details on the format, see [the export image endpoint](#operation/ImageGet).\n","produces":["application/x-tar"],"tags":["Image"],"summary":"Export several images","operationId":"ImageGetAll","parameters":[{"type":"array","items":{"type":"string"},"description":"Image names to filter by","name":"names","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"string","format":"binary"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/json":{"get":{"description":"Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image.\n\nThis endpoint returns the union of all images on all nodes in the cluster.","produces":["application/json"],"tags":["Image"],"summary":"List Images","operationId":"ImageList","parameters":[{"type":"boolean","default":false,"description":"Show all images. Only images from a final layer (no children) are shown by default.","name":"all","in":"query"},{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:\n\n- `before`=(`[:]`, `` or ``)\n- `dangling=true`\n- `label=key` or `label=\"key=value\"` of an image label\n- `reference`=(`[:]`)\n- `since`=(`[:]`, `` or ``)\n","name":"filters","in":"query"},{"type":"boolean","default":false,"description":"Show digest information as a `RepoDigests` field on each image.","name":"digests","in":"query"}],"responses":{"200":{"description":"Summary image data for the images matching the query","schema":{"type":"array","items":{"$ref":"#/definitions/ImageSummary"}},"examples":{"application/json":[{"Containers":2,"Created":1474925151,"Id":"sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8","Labels":{},"ParentId":"","RepoDigests":["ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787"],"RepoTags":["ubuntu:12.04","ubuntu:precise"],"SharedSize":0,"Size":103579269,"VirtualSize":103579269},{"Containers":5,"Created":1403128455,"Id":"sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175","Labels":{},"ParentId":"","RepoDigests":["ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7","ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3"],"RepoTags":["ubuntu:12.10","ubuntu:quantal"],"SharedSize":0,"Size":172064416,"VirtualSize":172064416}]}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/load":{"post":{"description":"Load a set of images and tags into a repository.\n\nFor details on the format, see [the export image endpoint](#operation/ImageGet).\n\n\nThe image is loaded on all nodes in the swarm that are compatible with the image's architecture. For example, Windows images aren't loaded on Linux nodes, and vice-versa.","consumes":["application/x-tar"],"produces":["application/json"],"tags":["Image"],"summary":"Import images","operationId":"ImageLoad","parameters":[{"description":"Tar archive containing images","name":"imagesTarball","in":"body","schema":{"type":"string","format":"binary"}},{"type":"boolean","default":false,"description":"Suppress progress details during load.","name":"quiet","in":"query"}],"responses":{"200":{"description":"no error"},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/prune":{"post":{"produces":["application/json"],"tags":["Image"],"summary":"Delete unused images","operationId":"ImagePrune","parameters":[{"type":"string","description":"Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters:\n\n- `dangling=` When set to `true` (or `1`), prune only\n unused *and* untagged images. When set to `false`\n (or `0`), all unused images are pruned.\n- `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.\n- `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"ImagePruneResponse","properties":{"ImagesDeleted":{"description":"Images that were deleted","type":"array","items":{"$ref":"#/definitions/ImageDeleteResponseItem"}},"SpaceReclaimed":{"description":"Disk space reclaimed in bytes","type":"integer","format":"int64"}}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/search":{"get":{"description":"Search for an image on Docker Hub.","produces":["application/json"],"tags":["Image"],"summary":"Search images","operationId":"ImageSearch","parameters":[{"type":"string","description":"Term to search","name":"term","in":"query","required":true},{"type":"integer","description":"Maximum number of results to return","name":"limit","in":"query"},{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:\n\n- `is-automated=(true|false)`\n- `is-official=(true|false)`\n- `stars=` Matches images that has at least 'number' stars.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"array","items":{"type":"object","title":"ImageSearchResponseItem","properties":{"description":{"type":"string"},"is_automated":{"type":"boolean"},"is_official":{"type":"boolean"},"name":{"type":"string"},"star_count":{"type":"integer"}}}},"examples":{"application/json":[{"description":"","is_automated":false,"is_official":false,"name":"wma55/u1210sshd","star_count":0},{"description":"","is_automated":false,"is_official":false,"name":"jdswinbank/sshd","star_count":0},{"description":"","is_automated":false,"is_official":false,"name":"vgauthier/sshd","star_count":0}]}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}":{"delete":{"description":"Remove an image, along with any untagged parent images that were\nreferenced by that image.\n\nImages can't be removed if they have descendant images, are being\nused by a running container or are being used by a build.\n","produces":["application/json"],"tags":["Image"],"summary":"Remove an image","operationId":"ImageDelete","parameters":[{"type":"string","description":"Image name or ID","name":"name","in":"path","required":true},{"type":"boolean","default":false,"description":"Remove the image even if it is being used by stopped containers or has other tags","name":"force","in":"query"},{"type":"boolean","default":false,"description":"Do not delete untagged parent images","name":"noprune","in":"query"}],"responses":{"200":{"description":"The image was deleted successfully","schema":{"type":"array","items":{"$ref":"#/definitions/ImageDeleteResponseItem"}},"examples":{"application/json":[{"Untagged":"3e2f21a89f"},{"Deleted":"3e2f21a89f"},{"Deleted":"53b4f83ac9"}]}},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"Conflict","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/get":{"get":{"description":"Get a tarball containing all images and metadata for a repository.\n\nIf `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced.\n\n### Image tarball format\n\nAn image tarball contains one directory per image layer (named using its long ID), each containing these files:\n\n- `VERSION`: currently `1.0` - the file format version\n- `json`: detailed layer information, similar to `docker inspect layer_id`\n- `layer.tar`: A tarfile containing the filesystem changes in this layer\n\nThe `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.\n\nIf the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.\n\n```json\n{\n \"hello-world\": {\n \"latest\": \"565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1\"\n }\n}\n```\n","produces":["application/x-tar"],"tags":["Image"],"summary":"Export an image","operationId":"ImageGet","parameters":[{"type":"string","description":"Image name or ID","name":"name","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"type":"string","format":"binary"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/history":{"get":{"description":"Return parent layers of an image.","produces":["application/json"],"tags":["Image"],"summary":"Get the history of an image","operationId":"ImageHistory","parameters":[{"type":"string","description":"Image name or ID","name":"name","in":"path","required":true}],"responses":{"200":{"description":"List of image layers","schema":{"type":"array","items":{"description":"individual image layer information in response to ImageHistory operation","type":"object","title":"HistoryResponseItem","required":["Id","Created","CreatedBy","Tags","Size","Comment"],"properties":{"Comment":{"type":"string","x-nullable":false},"Created":{"type":"integer","format":"int64","x-nullable":false},"CreatedBy":{"type":"string","x-nullable":false},"Id":{"type":"string","x-nullable":false},"Size":{"type":"integer","format":"int64","x-nullable":false},"Tags":{"type":"array","items":{"type":"string"}}},"x-go-name":"HistoryResponseItem"}},"examples":{"application/json":[{"Comment":"","Created":1398108230,"CreatedBy":"/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /","Id":"3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710","Size":182964289,"Tags":["ubuntu:lucid","ubuntu:10.04"]},{"Comment":"","Created":1398108222,"CreatedBy":"/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/","Id":"6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8","Size":0,"Tags":[]},{"Comment":"Imported from -","Created":1371157430,"CreatedBy":"","Id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","Size":0,"Tags":["scratch12:latest","scratch:latest"]}]}},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/json":{"get":{"description":"Return low-level information about an image.","produces":["application/json"],"tags":["Image"],"summary":"Inspect an image","operationId":"ImageInspect","parameters":[{"type":"string","description":"Image name or id","name":"name","in":"path","required":true}],"responses":{"200":{"description":"No error","schema":{"$ref":"#/definitions/Image"},"examples":{"application/json":{"Architecture":"amd64","Author":"","Comment":"","Config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/bash"],"Domainname":"","Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Hostname":"e611e15f9c9d","Image":"91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"","NetworkDisabled":false,"OnBuild":[],"OpenStdin":false,"PublishService":"","StdinOnce":false,"Tty":false,"User":"","WorkingDir":""},"Container":"cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a","ContainerConfig":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0"],"Domainname":"","Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Hostname":"e611e15f9c9d","Image":"91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"","NetworkDisabled":false,"OnBuild":[],"OpenStdin":false,"PublishService":"","StdinOnce":false,"Tty":false,"User":"","WorkingDir":""},"Created":"2015-09-10T08:30:53.26995814Z","DockerVersion":"1.9.0-dev","GraphDriver":{"Data":{},"Name":"aufs"},"Id":"sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c","Os":"linux","Parent":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c","RepoDigests":["localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"],"RepoTags":["example:1.0","example:latest","example:stable"],"RootFS":{"Layers":["sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"],"Type":"layers"},"Size":0,"VirtualSize":188359297}}},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such image: someimage (tag: latest)"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/push":{"post":{"description":"Push an image to a registry.\n\nIf you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`.\n\nThe push is cancelled if the HTTP connection is closed.\n","consumes":["application/octet-stream"],"tags":["Image"],"summary":"Push an image","operationId":"ImagePush","parameters":[{"type":"string","description":"Image name or ID.","name":"name","in":"path","required":true},{"type":"string","description":"The tag to associate with the image on the registry.","name":"tag","in":"query"},{"type":"string","description":"A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header","required":true}],"responses":{"200":{"description":"No error"},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/tag":{"post":{"description":"Tag an image so that it becomes part of a repository.","tags":["Image"],"summary":"Tag an image","operationId":"ImageTag","parameters":[{"type":"string","description":"Image name or ID to tag.","name":"name","in":"path","required":true},{"type":"string","description":"The repository to tag in. For example, `someuser/someimage`.","name":"repo","in":"query"},{"type":"string","description":"The name of the new tag.","name":"tag","in":"query"}],"responses":{"201":{"description":"No error"},"400":{"description":"Bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"Conflict","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/info":{"get":{"description":"UCP returns a combination of info about the swarm, including:\n\n- Swarm manager status\n\n- Swarm scheduler strategy\n\n- Swarm-manager endpoints that are useful for administrator access when troubleshooting\n\n- Engine proxy endpoints for each node in the swarm that are useful for troubleshooting\n\n- Plugins present on the current manager node\n\n- Engine information for the current manager node","produces":["application/json"],"tags":["System"],"summary":"Get system information","operationId":"SystemInfo","responses":{"200":{"description":"No error","schema":{"$ref":"#/definitions/SystemInfo"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/kubernetesNamespaces":{"get":{"description":"Lists all namespaces for which a user has a grant","produces":["application/json"],"tags":["UCP"],"summary":"Lists all namespaces for which a user has a grant","operationId":"ListUserNamespaces","responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/v1.NamespaceList"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/v1.NamespaceList"}}}}},"/metricsdiscovery":{"get":{"tags":["UCP"],"summary":"/metricsdiscovery","operationId":"restfulNoop","responses":{}}},"/networks":{"get":{"description":"Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect).\n\nNote that it uses a different, smaller representation of a network than inspecting a single network. For example,\nthe list of containers attached to the network is not propagated in API versions 1.28 and up.\n\n\nNode-specific networks, like those with bridge and third-party drivers, are prefixed with the node name.","produces":["application/json"],"tags":["Network"],"summary":"List networks","operationId":"NetworkList","parameters":[{"type":"string","description":"JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters:\n\n- `driver=` Matches a network's driver.\n- `id=` Matches all or part of a network ID.\n- `label=` or `label==` of a network label.\n- `name=` Matches all or part of a network name.\n- `scope=[\"swarm\"|\"global\"|\"local\"]` Filters networks by scope (`swarm`, `global`, or `local`).\n- `type=[\"custom\"|\"builtin\"]` Filters networks by type. The `custom` keyword returns all user-defined networks.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"array","items":{"$ref":"#/definitions/Network"}},"examples":{"application/json":[{"Attachable":false,"Created":"2016-10-19T06:21:00.416543526Z","Driver":"bridge","EnableIPv6":false,"IPAM":{"Config":[{"Subnet":"172.17.0.0/16"}],"Driver":"default"},"Id":"f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566","Ingress":false,"Internal":false,"Name":"bridge","Options":{"com.docker.network.bridge.default_bridge":"true","com.docker.network.bridge.enable_icc":"true","com.docker.network.bridge.enable_ip_masquerade":"true","com.docker.network.bridge.host_binding_ipv4":"0.0.0.0","com.docker.network.bridge.name":"docker0","com.docker.network.driver.mtu":"1500"},"Scope":"local"},{"Attachable":false,"Containers":{},"Created":"0001-01-01T00:00:00Z","Driver":"null","EnableIPv6":false,"IPAM":{"Config":[],"Driver":"default"},"Id":"e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794","Ingress":false,"Internal":false,"Name":"none","Options":{},"Scope":"local"},{"Attachable":false,"Containers":{},"Created":"0001-01-01T00:00:00Z","Driver":"host","EnableIPv6":false,"IPAM":{"Config":[],"Driver":"default"},"Id":"13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e","Ingress":false,"Internal":false,"Name":"host","Options":{},"Scope":"local"}]}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/create":{"post":{"description":"If the name is prefixed with the name of a node, the `create` request is sent to the specified node. Use the '/' character to separate the node name, like `testnode/testnet`.","consumes":["application/json"],"produces":["application/json"],"tags":["Network"],"summary":"Create a network","operationId":"NetworkCreate","parameters":[{"description":"Network configuration","name":"networkConfig","in":"body","required":true,"schema":{"type":"object","required":["Name"],"properties":{"Attachable":{"description":"Globally scoped network is manually attachable by regular containers from workers in swarm mode.","type":"boolean"},"CheckDuplicate":{"description":"Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions.","type":"boolean"},"Driver":{"description":"Name of the network driver plugin to use.","type":"string","default":"bridge"},"EnableIPv6":{"description":"Enable IPv6 on the network.","type":"boolean"},"IPAM":{"description":"Optional custom IP scheme for the network.","$ref":"#/definitions/IPAM"},"Ingress":{"description":"Ingress network is the network which provides the routing-mesh in swarm mode.","type":"boolean"},"Internal":{"description":"Restrict external access to the network.","type":"boolean"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"The network's name.","type":"string"},"Options":{"description":"Network specific options to be used by the drivers.","type":"object","additionalProperties":{"type":"string"}}},"example":{"Attachable":false,"CheckDuplicate":false,"Driver":"bridge","EnableIPv6":true,"IPAM":{"Config":[{"Gateway":"172.20.10.11","IPRange":"172.20.10.0/24","Subnet":"172.20.0.0/16"},{"Gateway":"2001:db8:abcd::1011","Subnet":"2001:db8:abcd::/64"}],"Driver":"default","Options":{"foo":"bar"}},"Ingress":false,"Internal":true,"Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Name":"isolated_nw","Options":{"com.docker.network.bridge.default_bridge":"true","com.docker.network.bridge.enable_icc":"true","com.docker.network.bridge.enable_ip_masquerade":"true","com.docker.network.bridge.host_binding_ipv4":"0.0.0.0","com.docker.network.bridge.name":"docker0","com.docker.network.driver.mtu":"1500"}}}}],"responses":{"201":{"description":"No error","schema":{"type":"object","title":"NetworkCreateResponse","properties":{"Id":{"description":"The ID of the created network.","type":"string"},"Warning":{"type":"string"}},"example":{"Id":"22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30","Warning":""}}},"403":{"description":"operation not supported for pre-defined networks","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"plugin not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/prune":{"post":{"description":"Not supported on UCP.","produces":["application/json"],"tags":["Network"],"summary":"Delete unused networks","operationId":"NetworkPrune","parameters":[{"type":"string","description":"Filters to process on the prune list, encoded as JSON (a `map[string][]string`).\n\nAvailable filters:\n- `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.\n- `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"NetworkPruneResponse","properties":{"NetworksDeleted":{"description":"Networks that were deleted","type":"array","items":{"type":"string"}}}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/{id}":{"get":{"description":"Node-specific networks, like those with bridge and third-party drivers, are prefixed with the node name.","produces":["application/json"],"tags":["Network"],"summary":"Inspect a network","operationId":"NetworkInspect","parameters":[{"type":"string","description":"Network ID or name","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Detailed inspect output for troubleshooting","name":"verbose","in":"query"},{"type":"string","description":"Filter the network by scope (swarm, global, or local)","name":"scope","in":"query"}],"responses":{"200":{"description":"No error","schema":{"$ref":"#/definitions/Network"}},"404":{"description":"Network not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"tags":["Network"],"summary":"Remove a network","operationId":"NetworkDelete","parameters":[{"type":"string","description":"Network ID or name","name":"id","in":"path","required":true}],"responses":{"204":{"description":"No error"},"403":{"description":"operation not supported for pre-defined networks","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such network","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/{id}/connect":{"post":{"consumes":["application/json"],"tags":["Network"],"summary":"Connect a container to a network","operationId":"NetworkConnect","parameters":[{"type":"string","description":"Network ID or name","name":"id","in":"path","required":true},{"name":"container","in":"body","required":true,"schema":{"type":"object","properties":{"Container":{"description":"The ID or name of the container to connect to the network.","type":"string"},"EndpointConfig":{"$ref":"#/definitions/EndpointSettings"}},"example":{"Container":"3613f73ba0e4","EndpointConfig":{"IPAMConfig":{"IPv4Address":"172.24.56.89","IPv6Address":"2001:db8::5689"}}}}}],"responses":{"200":{"description":"No error"},"403":{"description":"Operation not supported for swarm scoped networks","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"Network or container not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/{id}/disconnect":{"post":{"consumes":["application/json"],"tags":["Network"],"summary":"Disconnect a container from a network","operationId":"NetworkDisconnect","parameters":[{"type":"string","description":"Network ID or name","name":"id","in":"path","required":true},{"name":"container","in":"body","required":true,"schema":{"type":"object","properties":{"Container":{"description":"The ID or name of the container to disconnect from the network.","type":"string"},"Force":{"description":"Force the container to disconnect from the network.","type":"boolean"}}}}],"responses":{"200":{"description":"No error"},"403":{"description":"Operation not supported for swarm scoped networks","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"Network or container not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/nodes":{"get":{"description":"UCP augments the `Status.State` based on the status of the UCP components running on the current node.","tags":["Node"],"summary":"List nodes","operationId":"NodeList","parameters":[{"type":"string","description":"Filters to process on the nodes list, encoded as JSON (a `map[string][]string`).\n\nAvailable filters:\n- `id=`\n- `label=`\n- `membership=`(`accepted`|`pending`)`\n- `name=`\n- `role=`(`manager`|`worker`)`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Node"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/nodes/{id}":{"get":{"description":"UCP augments the `Status.State` based on the status of the UCP components running on the current node.","tags":["Node"],"summary":"Inspect a node","operationId":"NodeInspect","parameters":[{"type":"string","description":"The ID or name of the node","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Node"}},"404":{"description":"no such node","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"description":"If the current node is a manager, to keep the system healthy UCP attempts to unwind swarm components, like the KV store and auth store, from the node.","tags":["Node"],"summary":"Delete a node","operationId":"NodeDelete","parameters":[{"type":"string","description":"The ID or name of the node","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Force remove a node from the swarm","name":"force","in":"query"}],"responses":{"200":{"description":"no error"},"404":{"description":"no such node","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/nodes/{id}/update":{"post":{"tags":["Node"],"summary":"Update a node","operationId":"NodeUpdate","parameters":[{"type":"string","description":"The ID of the node","name":"id","in":"path","required":true},{"name":"body","in":"body","schema":{"$ref":"#/definitions/NodeSpec"}},{"type":"integer","format":"int64","description":"The version number of the node object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such node","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins":{"get":{"description":"Returns information about installed plugins.\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","produces":["application/json"],"tags":["Plugin"],"summary":"List plugins","operationId":"PluginList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters:\n\n- `capability=`\n- `enable=|`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"array","items":{"$ref":"#/definitions/Plugin"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/create":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","consumes":["application/x-tar"],"tags":["Plugin"],"summary":"Create a plugin","operationId":"PluginCreate","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"query","required":true},{"description":"Path to tar containing plugin rootfs and manifest","name":"tarContext","in":"body","schema":{"type":"string","format":"binary"}}],"responses":{"204":{"description":"no error"},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/pull":{"post":{"description":"Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).\n\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","produces":["application/json"],"tags":["Plugin"],"summary":"Install a plugin","operationId":"PluginPull","parameters":[{"type":"string","description":"Remote reference for plugin to install.\n\nThe `:latest` tag is optional, and is used as the default if omitted.\n","name":"remote","in":"query","required":true},{"type":"string","description":"Local name for the pulled plugin.\n\nThe `:latest` tag is optional, and is used as the default if omitted.\n","name":"name","in":"query"},{"type":"string","description":"A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"},{"name":"body","in":"body","schema":{"type":"array","items":{"description":"Describes a permission accepted by the user upon installing the plugin.","type":"object","properties":{"Description":{"type":"string"},"Name":{"type":"string"},"Value":{"type":"array","items":{"type":"string"}}}},"example":[{"Description":"","Name":"network","Value":["host"]},{"Description":"","Name":"mount","Value":["/data"]},{"Description":"","Name":"device","Value":["/dev/cpu_dma_latency"]}]}}],"responses":{"204":{"description":"no error"},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}":{"delete":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Remove a plugin","operationId":"PluginDelete","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true},{"type":"boolean","default":false,"description":"Disable the plugin before removing. This may result in issues if the plugin is in use by a container.","name":"force","in":"query"}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Plugin"}},"404":{"description":"plugin is not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/disable":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Disable a plugin","operationId":"PluginDisable","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true}],"responses":{"200":{"description":"no error"},"404":{"description":"plugin is not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/enable":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Enable a plugin","operationId":"PluginEnable","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true},{"type":"integer","default":0,"description":"Set the HTTP client timeout (in seconds)","name":"timeout","in":"query"}],"responses":{"200":{"description":"no error"},"404":{"description":"plugin is not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/push":{"post":{"description":"Push a plugin to the registry.\n\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Push a plugin","operationId":"PluginPush","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true}],"responses":{"200":{"description":"no error"},"404":{"description":"plugin not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/set":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","consumes":["application/json"],"tags":["Plugin"],"summary":"Configure a plugin","operationId":"PluginSet","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true},{"name":"body","in":"body","schema":{"type":"array","items":{"type":"string"},"example":["DEBUG=1"]}}],"responses":{"204":{"description":"No error"},"404":{"description":"Plugin not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/upgrade":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Upgrade a plugin","operationId":"PluginUpgrade","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true},{"type":"string","description":"Remote reference to upgrade to.\n\nThe `:latest` tag is optional, and is used as the default if omitted.\n","name":"remote","in":"query","required":true},{"type":"string","description":"A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"},{"name":"body","in":"body","schema":{"type":"array","items":{"description":"Describes a permission accepted by the user upon installing the plugin.","type":"object","properties":{"Description":{"type":"string"},"Name":{"type":"string"},"Value":{"type":"array","items":{"type":"string"}}}},"example":[{"Description":"","Name":"network","Value":["host"]},{"Description":"","Name":"mount","Value":["/data"]},{"Description":"","Name":"device","Value":["/dev/cpu_dma_latency"]}]}}],"responses":{"204":{"description":"no error"},"404":{"description":"plugin not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/roles":{"get":{"description":"Lists all roles in the system.","produces":["application/json"],"tags":["UCP"],"summary":"Lists all roles in the system.","operationId":"ListRoles","responses":{"200":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/role.Role"}}},"default":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/role.Role"}}}}},"post":{"description":"Creates a new custom role","consumes":["application/json"],"tags":["UCP"],"summary":"Creates a new custom role","operationId":"CreateRole","parameters":[{"type":"role.Role","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/role.Role"}}],"responses":{"201":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.RoleCreateResponse"}}}}}},"/roles/{role}":{"get":{"description":"Retrieves a single role by ID","produces":["application/json"],"tags":["UCP"],"summary":"Retrieves a single role by ID","operationId":"GetRole","parameters":[{"type":"string","default":"","description":"Name of the role to get","name":"role","in":"path","required":true}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/role.Role"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/role.Role"}}}},"delete":{"description":"Deletes a role by name","produces":["application/json"],"tags":["UCP"],"summary":"Deletes a role by name","operationId":"DeleteRole","parameters":[{"type":"string","default":"","description":"Name of the role to delete","name":"role","in":"path","required":true}],"responses":{"204":{"description":"Success"}}}},"/secrets":{"get":{"produces":["application/json"],"tags":["Secret"],"summary":"List secrets","operationId":"SecretList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters:\n\n- `id=`\n- `label= or label==value`\n- `name=`\n- `names=`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Secret"},"example":[{"CreatedAt":"2017-07-20T13:55:28.678958722Z","ID":"blt1owaxmitz71s9v5zh81zun","Spec":{"Driver":{"Name":"secret-bucket","Options":{"OptionA":"value for driver option A","OptionB":"value for driver option B"}},"Labels":{"some.label":"some.value"},"Name":"mysql-passwd"},"UpdatedAt":"2017-07-20T13:55:28.678958722Z","Version":{"Index":85}},{"CreatedAt":"2016-11-05T01:20:17.327670065Z","ID":"ktnbjxoalbkvbvedmg1urrz8h","Spec":{"Labels":{"foo":"bar"},"Name":"app-dev.crt"},"UpdatedAt":"2016-11-05T01:20:17.327670065Z","Version":{"Index":11}}]}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/secrets/create":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Secret"],"summary":"Create a secret","operationId":"SecretCreate","parameters":[{"name":"body","in":"body","schema":{"allOf":[{"$ref":"#/definitions/SecretSpec"},{"type":"object","example":{"Data":"VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==","Driver":{"Name":"secret-bucket","Options":{"OptionA":"value for driver option A","OptionB":"value for driver option B"}},"Labels":{"foo":"bar"},"Name":"app-key.crt"}}]}}],"responses":{"201":{"description":"no error","schema":{"$ref":"#/definitions/IdResponse"}},"409":{"description":"name conflicts with an existing object","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/secrets/{id}":{"get":{"produces":["application/json"],"tags":["Secret"],"summary":"Inspect a secret","operationId":"SecretInspect","parameters":[{"type":"string","description":"ID of the secret","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Secret"},"examples":{"application/json":{"CreatedAt":"2016-11-05T01:20:17.327670065Z","ID":"ktnbjxoalbkvbvedmg1urrz8h","Spec":{"Driver":{"Name":"secret-bucket","Options":{"OptionA":"value for driver option A","OptionB":"value for driver option B"}},"Labels":{"foo":"bar"},"Name":"app-dev.crt"},"UpdatedAt":"2016-11-05T01:20:17.327670065Z","Version":{"Index":11}}}},"404":{"description":"secret not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"produces":["application/json"],"tags":["Secret"],"summary":"Delete a secret","operationId":"SecretDelete","parameters":[{"type":"string","description":"ID of the secret","name":"id","in":"path","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"secret not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/secrets/{id}/update":{"post":{"tags":["Secret"],"summary":"Update a Secret","operationId":"SecretUpdate","parameters":[{"type":"string","description":"The ID or name of the secret","name":"id","in":"path","required":true},{"description":"The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values.","name":"body","in":"body","schema":{"$ref":"#/definitions/SecretSpec"}},{"type":"integer","format":"int64","description":"The version number of the secret object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such secret","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services":{"get":{"tags":["Service"],"summary":"List services","operationId":"ServiceList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters:\n\n- `id=`\n- `label=`\n- `mode=[\"replicated\"|\"global\"]`\n- `name=`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Service"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services/create":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Service"],"summary":"Create a service","operationId":"ServiceCreate","parameters":[{"name":"body","in":"body","required":true,"schema":{"allOf":[{"$ref":"#/definitions/ServiceSpec"},{"type":"object","example":{"EndpointSpec":{"Ports":[{"Protocol":"tcp","PublishedPort":8080,"TargetPort":80}]},"Labels":{"foo":"bar"},"Mode":{"Replicated":{"Replicas":4}},"Name":"web","RollbackConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":1},"TaskTemplate":{"ContainerSpec":{"DNSConfig":{"Nameservers":["8.8.8.8"],"Options":["timeout:3"],"Search":["example.org"]},"Hosts":["10.10.10.10 host1","ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"],"Image":"nginx:alpine","Mounts":[{"ReadOnly":true,"Source":"web-data","Target":"/usr/share/nginx/html","Type":"volume","VolumeOptions":{"DriverConfig":{},"Labels":{"com.example.something":"something-value"}}}],"Secrets":[{"File":{"GID":"33","Mode":384,"Name":"www.example.org.key","UID":"33"},"SecretID":"fpjqlhnwb19zds35k8wn80lq9","SecretName":"example_org_domain_key"}],"User":"33"},"LogDriver":{"Name":"json-file","Options":{"max-file":"3","max-size":"10M"}},"Placement":{},"Resources":{"Limits":{"MemoryBytes":104857600},"Reservations":{}},"RestartPolicy":{"Condition":"on-failure","Delay":10000000000,"MaxAttempts":10}},"UpdateConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":2}}}]}},{"type":"string","description":"A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"}],"responses":{"201":{"description":"no error","schema":{"type":"object","title":"ServiceCreateResponse","properties":{"ID":{"description":"The ID of the created service.","type":"string"},"Warning":{"description":"Optional warning message","type":"string"}},"example":{"ID":"ak7w3gjqoa3kuz8xcpnyy0pvl","Warning":"unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"}}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"403":{"description":"network is not eligible for services","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"name conflicts with an existing service","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services/{id}":{"get":{"tags":["Service"],"summary":"Inspect a service","operationId":"ServiceInspect","parameters":[{"type":"string","description":"ID or name of service.","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Fill empty fields with default values.","name":"insertDefaults","in":"query"}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Service"}},"404":{"description":"no such service","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"tags":["Service"],"summary":"Delete a service","operationId":"ServiceDelete","parameters":[{"type":"string","description":"ID or name of service.","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error"},"404":{"description":"no such service","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services/{id}/logs":{"get":{"description":"Get `stdout` and `stderr` logs from a service.\n\n**Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers.\n","produces":["application/vnd.docker.raw-stream","application/json"],"tags":["Service"],"summary":"Get service logs","operationId":"ServiceLogs","parameters":[{"type":"string","description":"ID or name of the service","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Show service context and extra details provided to logs.","name":"details","in":"query"},{"type":"boolean","default":false,"description":"Return the logs as a stream.\n\nThis will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).\n","name":"follow","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stdout`","name":"stdout","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stderr`","name":"stderr","in":"query"},{"type":"integer","default":0,"description":"Only return logs since this time, as a UNIX timestamp","name":"since","in":"query"},{"type":"boolean","default":false,"description":"Add timestamps to every log line","name":"timestamps","in":"query"},{"type":"string","default":"all","description":"Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines.","name":"tail","in":"query"}],"responses":{"101":{"description":"logs returned as a stream","schema":{"type":"string","format":"binary"}},"200":{"description":"logs returned as a string in response body","schema":{"type":"string"}},"404":{"description":"no such service","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such service: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services/{id}/update":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Service"],"summary":"Update a service","operationId":"ServiceUpdate","parameters":[{"type":"string","description":"ID or name of service.","name":"id","in":"path","required":true},{"name":"body","in":"body","required":true,"schema":{"allOf":[{"$ref":"#/definitions/ServiceSpec"},{"type":"object","example":{"EndpointSpec":{"Mode":"vip"},"Mode":{"Replicated":{"Replicas":1}},"Name":"top","RollbackConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":1},"TaskTemplate":{"ContainerSpec":{"Args":["top"],"Image":"busybox"},"ForceUpdate":0,"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"UpdateConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":2}}}]}},{"type":"integer","description":"The version number of the service object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true},{"type":"string","default":"spec","description":"If the X-Registry-Auth header is not specified, this parameter indicates where to find registry authorization credentials. The valid values are `spec` and `previous-spec`.","name":"registryAuthFrom","in":"query"},{"type":"string","description":"Set to this parameter to `previous` to cause a server-side rollback to the previous service spec. The supplied spec will be ignored in this case.","name":"rollback","in":"query"},{"type":"string","description":"A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/ServiceUpdateResponse"}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such service","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm":{"get":{"tags":["Swarm"],"summary":"Inspect swarm","operationId":"SwarmInspect","responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Swarm"}},"404":{"description":"no such swarm","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm/init":{"post":{"produces":["application/json","text/plain"],"tags":["Swarm"],"summary":"Initialize a new swarm","operationId":"SwarmInit","parameters":[{"name":"body","in":"body","required":true,"schema":{"type":"object","properties":{"AdvertiseAddr":{"description":"Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible.","type":"string"},"DataPathAddr":{"description":"Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`,\nor an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`\nis used.\n\nThe `DataPathAddr` specifies the address that global scope network drivers will publish towards other\nnodes in order to reach the containers running on this node. Using this parameter it is possible to\nseparate the container data traffic from the management traffic of the cluster.\n","type":"string"},"DefaultAddrPool":{"description":"Default Address Pool specifies default subnet pools for global scope networks.\n","type":"array","items":{"type":"string","example":["10.10.0.0/16","20.20.0.0/16"]}},"ForceNewCluster":{"description":"Force creation of a new swarm.","type":"boolean"},"ListenAddr":{"description":"Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used.","type":"string"},"Spec":{"$ref":"#/definitions/SwarmSpec"},"SubnetSize":{"description":"SubnetSize specifies the subnet size of the networks created from the default subnet pool\n","type":"integer","format":"uint32"}},"example":{"AdvertiseAddr":"192.168.1.1:2377","DefaultAddrPool":["10.10.0.0/8","20.20.0.0/8"],"ForceNewCluster":false,"ListenAddr":"0.0.0.0:2377","Spec":{"CAConfig":{},"Dispatcher":{},"EncryptionConfig":{"AutoLockManagers":false},"Orchestration":{},"Raft":{}},"SubnetSize":24}}}],"responses":{"200":{"description":"no error","schema":{"description":"The node ID","type":"string","example":"7v2t30z9blmxuhnyo6s4cpenp"}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is already part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm/join":{"post":{"tags":["Swarm"],"summary":"Join an existing swarm","operationId":"SwarmJoin","parameters":[{"name":"body","in":"body","required":true,"schema":{"type":"object","properties":{"AdvertiseAddr":{"description":"Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible.","type":"string"},"DataPathAddr":{"description":"Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`,\nor an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`\nis used.\n\nThe `DataPathAddr` specifies the address that global scope network drivers will publish towards other\nnodes in order to reach the containers running on this node. Using this parameter it is possible to\nseparate the container data traffic from the management traffic of the cluster.\n","type":"string"},"JoinToken":{"description":"Secret token for joining this swarm.","type":"string"},"ListenAddr":{"description":"Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP).","type":"string"},"RemoteAddrs":{"description":"Addresses of manager nodes already participating in the swarm.","type":"string"}},"example":{"AdvertiseAddr":"192.168.1.1:2377","JoinToken":"SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2","ListenAddr":"0.0.0.0:2377","RemoteAddrs":["node1:2377"]}}}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is already part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm/leave":{"post":{"tags":["Swarm"],"summary":"Leave a swarm","operationId":"SwarmLeave","parameters":[{"type":"boolean","default":false,"description":"Force leave swarm, even if this is the last manager or that it will break the cluster.","name":"force","in":"query"}],"responses":{"200":{"description":"no error"},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm/update":{"post":{"tags":["Swarm"],"summary":"Update a swarm","operationId":"SwarmUpdate","parameters":[{"name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/SwarmSpec"}},{"type":"integer","format":"int64","description":"The version number of the swarm object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true},{"type":"boolean","default":false,"description":"Rotate the worker join token.","name":"rotateWorkerToken","in":"query"},{"type":"boolean","default":false,"description":"Rotate the manager join token.","name":"rotateManagerToken","in":"query"},{"type":"boolean","default":false,"description":"Rotate the manager unlock key.","name":"rotateManagerUnlockKey","in":"query"}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/tasks":{"get":{"produces":["application/json"],"tags":["Task"],"summary":"List tasks","operationId":"TaskList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters:\n\n- `desired-state=(running | shutdown | accepted)`\n- `id=`\n- `label=key` or `label=\"key=value\"`\n- `name=`\n- `node=`\n- `service=`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Task"},"example":[{"CreatedAt":"2016-06-07T21:07:31.171892745Z","DesiredState":"running","ID":"0kzzo1i0y4jz6027t0k7aezc7","NetworksAttachments":[{"Addresses":["10.255.0.10/16"],"Network":{"CreatedAt":"2016-06-07T20:31:11.912919752Z","DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"ID":"4qvuz4ko70xaltuqbt8956gd1","IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{"Name":"default"}},"Spec":{"DriverConfiguration":{},"IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{}},"Labels":{"com.docker.swarm.internal":"true"},"Name":"ingress"},"UpdatedAt":"2016-06-07T21:07:29.955277358Z","Version":{"Index":18}}}],"NodeID":"60gvrl6tm78dmak4yl7srz94v","ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"Spec":{"ContainerSpec":{"Image":"redis"},"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"Status":{"ContainerStatus":{"ContainerID":"e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035","PID":677},"Message":"started","State":"running","Timestamp":"2016-06-07T21:07:31.290032978Z"},"UpdatedAt":"2016-06-07T21:07:31.376370513Z","Version":{"Index":71}},{"CreatedAt":"2016-06-07T21:07:30.019104782Z","DesiredState":"shutdown","ID":"1yljwbmlr8er2waf8orvqpwms","Name":"hopeful_cori","NetworksAttachments":[{"Addresses":["10.255.0.5/16"],"Network":{"CreatedAt":"2016-06-07T20:31:11.912919752Z","DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"ID":"4qvuz4ko70xaltuqbt8956gd1","IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{"Name":"default"}},"Spec":{"DriverConfiguration":{},"IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{}},"Labels":{"com.docker.swarm.internal":"true"},"Name":"ingress"},"UpdatedAt":"2016-06-07T21:07:29.955277358Z","Version":{"Index":18}}}],"NodeID":"60gvrl6tm78dmak4yl7srz94v","ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"Spec":{"ContainerSpec":{"Image":"redis"},"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"Status":{"ContainerStatus":{"ContainerID":"1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"},"Message":"shutdown","State":"shutdown","Timestamp":"2016-06-07T21:07:30.202183143Z"},"UpdatedAt":"2016-06-07T21:07:30.231958098Z","Version":{"Index":30}}]}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/tasks/{id}":{"get":{"produces":["application/json"],"tags":["Task"],"summary":"Inspect a task","operationId":"TaskInspect","parameters":[{"type":"string","description":"ID of the task","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Task"}},"404":{"description":"no such task","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/tasks/{id}/logs":{"get":{"description":"Get `stdout` and `stderr` logs from a task.\n\n**Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers.\n","produces":["application/vnd.docker.raw-stream","application/json"],"tags":["UCP"],"summary":"Get task logs","operationId":"TaskLogs","parameters":[{"type":"string","description":"ID of the task","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Show task context and extra details provided to logs.","name":"details","in":"query"},{"type":"boolean","default":false,"description":"Return the logs as a stream.\n\nThis will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).\n","name":"follow","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stdout`","name":"stdout","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stderr`","name":"stderr","in":"query"},{"type":"integer","default":0,"description":"Only return logs since this time, as a UNIX timestamp","name":"since","in":"query"},{"type":"boolean","default":false,"description":"Add timestamps to every log line","name":"timestamps","in":"query"},{"type":"string","default":"all","description":"Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines.","name":"tail","in":"query"}],"responses":{"101":{"description":"logs returned as a stream","schema":{"type":"string","format":"binary"}},"200":{"description":"logs returned as a string in response body","schema":{"type":"string"}},"404":{"description":"no such task","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such task: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/totalRole":{"get":{"description":"Returns a role with all operations that a user can perform against at least one collection in the system.","produces":["application/json"],"tags":["UCP"],"summary":"Returns a role with all operations that a user can perform against at least one collection in the system.","operationId":"TotalRole","responses":{"200":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/role.Role"}}},"default":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/role.Role"}}}}}},"/version":{"get":{"description":"Returns the version of Docker that is running and various information about the system that Docker is running on.","produces":["application/json"],"tags":["System"],"summary":"Get version","operationId":"SystemVersion","responses":{"200":{"description":"no error","schema":{"type":"object","title":"SystemVersionResponse","properties":{"ApiVersion":{"type":"string"},"Arch":{"type":"string"},"BuildTime":{"type":"string"},"Components":{"type":"array","items":{"type":"object","required":["Name","Version"],"properties":{"Details":{"type":"object","x-nullable":true},"Name":{"type":"string"},"Version":{"type":"string","x-nullable":false}},"x-go-name":"ComponentVersion"}},"Experimental":{"type":"boolean"},"GitCommit":{"type":"string"},"GoVersion":{"type":"string"},"KernelVersion":{"type":"string"},"MinAPIVersion":{"type":"string"},"Os":{"type":"string"},"Platform":{"type":"object","required":["Name"],"properties":{"Name":{"type":"string"}}},"Version":{"type":"string"}}},"examples":{"application/json":{"ApiVersion":"1.27","Arch":"amd64","BuildTime":"2016-06-14T07:09:13.444803460+00:00","Experimental":true,"GitCommit":"deadbee","GoVersion":"go1.7.5","KernelVersion":"3.19.0-23-generic","MinAPIVersion":"1.12","Os":"linux","Version":"17.04.0"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/volumes":{"get":{"description":"If the name is prefixed with the name of a node, the request is sent to the specified node. Use the '/' character to distinguish the node name, like `testnode/testvolume`.","produces":["application/json"],"tags":["Volume"],"summary":"List volumes","operationId":"VolumeList","parameters":[{"type":"string","format":"json","description":"JSON encoded value of the filters (a `map[string][]string`) to\nprocess on the volumes list. Available filters:\n\n- `dangling=` When set to `true` (or `1`), returns all\n volumes that are not in use by a container. When set to `false`\n (or `0`), only volumes that are in use by one or more\n containers are returned.\n- `driver=` Matches volumes based on their driver.\n- `label=` or `label=:` Matches volumes based on\n the presence of a `label` alone or a `label` and a value.\n- `name=` Matches all or part of a volume name.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"Summary volume data that matches the query","schema":{"description":"Volume list response","type":"object","title":"VolumeListResponse","required":["Volumes","Warnings"],"properties":{"Volumes":{"description":"List of volumes","type":"array","items":{"$ref":"#/definitions/Volume"},"x-nullable":false},"Warnings":{"description":"Warnings that occurred when fetching the list of volumes","type":"array","items":{"type":"string"},"x-nullable":false}}},"examples":{"application/json":{"Volumes":[{"CreatedAt":"2017-07-19T12:00:26Z","Driver":"local","Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Mountpoint":"/var/lib/docker/volumes/tardis","Name":"tardis","Options":{"device":"tmpfs","o":"size=100m,uid=1000","type":"tmpfs"},"Scope":"local"}],"Warnings":[]}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/volumes/create":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Volume"],"summary":"Create a volume","operationId":"VolumeCreate","parameters":[{"description":"Volume configuration","name":"volumeConfig","in":"body","required":true,"schema":{"description":"Volume configuration","type":"object","title":"VolumeConfig","properties":{"Driver":{"description":"Name of the volume driver to use.","type":"string","default":"local","x-nullable":false},"DriverOpts":{"description":"A mapping of driver options and values. These options are passed directly to the driver and are driver specific.","type":"object","additionalProperties":{"type":"string"}},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"The new volume's name. If not specified, Docker generates a name.","type":"string","x-nullable":false}},"example":{"Driver":"custom","Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Name":"tardis"}}}],"responses":{"201":{"description":"The volume was created successfully","schema":{"$ref":"#/definitions/Volume"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/volumes/prune":{"post":{"produces":["application/json"],"tags":["Volume"],"summary":"Delete unused volumes","operationId":"VolumePrune","parameters":[{"type":"string","description":"Filters to process on the prune list, encoded as JSON (a `map[string][]string`).\n\nAvailable filters:\n- `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"VolumePruneResponse","properties":{"SpaceReclaimed":{"description":"Disk space reclaimed in bytes","type":"integer","format":"int64"},"VolumesDeleted":{"description":"Volumes that were deleted","type":"array","items":{"type":"string"}}}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/volumes/{name}":{"get":{"description":"If the name is prefixed with the name of a node, the request is sent to the specified node. Use the '/' character to distinguish the node name, like `testnode/testvolume`.","produces":["application/json"],"tags":["Volume"],"summary":"Inspect a volume","operationId":"VolumeInspect","parameters":[{"type":"string","description":"Volume name or ID","name":"name","in":"path","required":true}],"responses":{"200":{"description":"No error","schema":{"$ref":"#/definitions/Volume"}},"404":{"description":"No such volume","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"description":"Instruct the driver to remove the volume.","tags":["Volume"],"summary":"Remove a volume","operationId":"VolumeDelete","parameters":[{"type":"string","description":"Volume name or ID","name":"name","in":"path","required":true},{"type":"boolean","default":false,"description":"Force the removal of the volume","name":"force","in":"query"}],"responses":{"204":{"description":"The volume was removed"},"404":{"description":"No such volume or volume driver","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"Volume is in use and cannot be removed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}}},"definitions":{"Address":{"description":"Address represents an IPv4 or IPv6 IP address.","type":"object","properties":{"Addr":{"description":"IP address.","type":"string"},"PrefixLen":{"description":"Mask length of the IP address.","type":"integer"}}},"AuthConfig":{"type":"object","properties":{"email":{"type":"string"},"password":{"type":"string"},"serveraddress":{"type":"string"},"username":{"type":"string"}},"example":{"password":"xxxx","serveraddress":"https://index.docker.io/v1/","username":"hannibal"}},"BuildInfo":{"type":"object","properties":{"aux":{"$ref":"#/definitions/ImageID"},"error":{"type":"string"},"errorDetail":{"$ref":"#/definitions/ErrorDetail"},"id":{"type":"string"},"progress":{"type":"string"},"progressDetail":{"$ref":"#/definitions/ProgressDetail"},"status":{"type":"string"},"stream":{"type":"string"}}},"ClusterInfo":{"description":"ClusterInfo represents information about the swarm as is returned by the\n\"/info\" endpoint. Join-tokens are not included.\n","type":"object","properties":{"CreatedAt":{"description":"Date and time at which the swarm was initialised in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n","type":"string","format":"dateTime","example":"2016-08-18T10:44:24.496525531Z"},"ID":{"description":"The ID of the swarm.","type":"string","example":"abajmipo7b4xz5ip2nrla6b11"},"RootRotationInProgress":{"description":"Whether there is currently a root CA rotation in progress for the swarm","type":"boolean","example":false},"Spec":{"$ref":"#/definitions/SwarmSpec"},"TLSInfo":{"$ref":"#/definitions/TLSInfo"},"UpdatedAt":{"description":"Date and time at which the swarm was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n","type":"string","format":"dateTime","example":"2017-08-09T07:09:37.632105588Z"},"Version":{"$ref":"#/definitions/ObjectVersion"}},"x-nullable":true},"Commit":{"description":"Commit holds the Git-commit (SHA1) that a binary was built from, as\nreported in the version-string of external tools, such as `containerd`,\nor `runC`.\n","type":"object","properties":{"Expected":{"description":"Commit ID of external tool expected by dockerd as set at build time.\n","type":"string","example":"2d41c047c83e09a6d61d464906feb2a2f3c52aa4"},"ID":{"description":"Actual commit ID of external tool.","type":"string","example":"cfb82a876ecc11b5ca0977d1733adbe58599088a"}}},"Config":{"type":"object","properties":{"CreatedAt":{"type":"string","format":"dateTime"},"ID":{"type":"string"},"Spec":{"$ref":"#/definitions/ConfigSpec"},"UpdatedAt":{"type":"string","format":"dateTime"},"Version":{"$ref":"#/definitions/ObjectVersion"}}},"ConfigSpec":{"type":"object","properties":{"Data":{"description":"Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))\nconfig data.\n","type":"string"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"User-defined name of the config.","type":"string"},"Templating":{"description":"Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n","$ref":"#/definitions/Driver"}}},"ContainerConfig":{"description":"Configuration for a container that is portable between hosts","type":"object","properties":{"ArgsEscaped":{"description":"Command is already escaped (Windows only)","type":"boolean"},"AttachStderr":{"description":"Whether to attach to `stderr`.","type":"boolean","default":true},"AttachStdin":{"description":"Whether to attach to `stdin`.","type":"boolean","default":false},"AttachStdout":{"description":"Whether to attach to `stdout`.","type":"boolean","default":true},"Cmd":{"description":"Command to run specified as a string or an array of strings.","type":"array","items":{"type":"string"}},"Domainname":{"description":"The domain name to use for the container.","type":"string"},"Entrypoint":{"description":"The entry point for the container as a string or an array of strings.\n\nIf the array consists of exactly one empty string (`[\"\"]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).\n","type":"array","items":{"type":"string"}},"Env":{"description":"A list of environment variables to set inside the container in the form `[\"VAR=value\", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value.\n","type":"array","items":{"type":"string"}},"ExposedPorts":{"description":"An object mapping ports to an empty object in the form:\n\n`{\"/\": {}}`\n","type":"object","additionalProperties":{"type":"object","default":{},"enum":[{}]}},"Healthcheck":{"$ref":"#/definitions/HealthConfig"},"Hostname":{"description":"The hostname to use for the container, as a valid RFC 1123 hostname.","type":"string"},"Image":{"description":"The name of the image to use when creating the container","type":"string"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"MacAddress":{"description":"MAC address of the container.","type":"string"},"NetworkDisabled":{"description":"Disable networking for the container.","type":"boolean"},"OnBuild":{"description":"`ONBUILD` metadata that were defined in the image's `Dockerfile`.","type":"array","items":{"type":"string"}},"OpenStdin":{"description":"Open `stdin`","type":"boolean","default":false},"Shell":{"description":"Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.","type":"array","items":{"type":"string"}},"StdinOnce":{"description":"Close `stdin` after one attached client disconnects","type":"boolean","default":false},"StopSignal":{"description":"Signal to stop a container as a string or unsigned integer.","type":"string","default":"SIGTERM"},"StopTimeout":{"description":"Timeout to stop a container in seconds.","type":"integer","default":10},"Tty":{"description":"Attach standard streams to a TTY, including `stdin` if it is not closed.","type":"boolean","default":false},"User":{"description":"The user that commands are run as inside the container.","type":"string"},"Volumes":{"description":"An object mapping mount point paths inside the container to empty objects.","type":"object","additionalProperties":{"type":"object","default":{},"enum":[{}]}},"WorkingDir":{"description":"The working directory for commands to run in.","type":"string"}}},"ContainerSummary":{"type":"array","items":{"type":"object","properties":{"Command":{"description":"Command to run when starting the container","type":"string"},"Created":{"description":"When the container was created","type":"integer","format":"int64"},"HostConfig":{"type":"object","properties":{"NetworkMode":{"type":"string"}}},"Id":{"description":"The ID of this container","type":"string","x-go-name":"ID"},"Image":{"description":"The name of the image used when creating this container","type":"string"},"ImageID":{"description":"The ID of the image that this container was created from","type":"string"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Mounts":{"type":"array","items":{"$ref":"#/definitions/Mount"}},"Names":{"description":"The names that this container has been given","type":"array","items":{"type":"string"}},"NetworkSettings":{"description":"A summary of the container's network settings","type":"object","properties":{"Networks":{"type":"object","additionalProperties":{"$ref":"#/definitions/EndpointSettings"}}}},"Ports":{"description":"The ports exposed by this container","type":"array","items":{"$ref":"#/definitions/Port"}},"SizeRootFs":{"description":"The total size of all the files in this container","type":"integer","format":"int64"},"SizeRw":{"description":"The size of files that have been created or changed by this container","type":"integer","format":"int64"},"State":{"description":"The state of this container (e.g. `Exited`)","type":"string"},"Status":{"description":"Additional human-readable status of this container (e.g. `Exit 0`)","type":"string"}}}},"CreateImageInfo":{"type":"object","properties":{"error":{"type":"string"},"id":{"type":"string"},"progress":{"type":"string"},"progressDetail":{"$ref":"#/definitions/ProgressDetail"},"status":{"type":"string"}}},"DeviceMapping":{"description":"A device mapping between the host and container","type":"object","properties":{"CgroupPermissions":{"type":"string"},"PathInContainer":{"type":"string"},"PathOnHost":{"type":"string"}},"example":{"CgroupPermissions":"mrw","PathInContainer":"/dev/deviceName","PathOnHost":"/dev/deviceName"}},"Driver":{"description":"Driver represents a driver (network, logging, secrets).","type":"object","required":["Name"],"properties":{"Name":{"description":"Name of the driver.","type":"string","x-nullable":false,"example":"some-driver"},"Options":{"description":"Key/value map of driver-specific options.","type":"object","additionalProperties":{"type":"string"},"x-nullable":false,"example":{"OptionA":"value for driver-specific option A","OptionB":"value for driver-specific option B"}}}},"EndpointIPAMConfig":{"description":"EndpointIPAMConfig represents an endpoint's IPAM configuration.\n","type":"object","properties":{"IPv4Address":{"type":"string","example":"172.20.30.33"},"IPv6Address":{"type":"string","example":"2001:db8:abcd::3033"},"LinkLocalIPs":{"type":"array","items":{"type":"string"},"example":["169.254.34.68","fe80::3468"]}},"x-nullable":true},"EndpointPortConfig":{"type":"object","properties":{"Name":{"type":"string"},"Protocol":{"type":"string","enum":["tcp","udp","sctp"]},"PublishMode":{"description":"The mode in which port is published.\n\n


    \n\n- \"ingress\" makes the target port accessible on on every node,\n regardless of whether there is a task for the service running on\n that node or not.\n- \"host\" bypasses the routing mesh and publish the port directly on\n the swarm node where that service is running.\n","type":"string","default":"ingress","enum":["ingress","host"],"example":"ingress"},"PublishedPort":{"description":"The port on the swarm hosts.","type":"integer"},"TargetPort":{"description":"The port inside the container.","type":"integer"}}},"EndpointSettings":{"description":"Configuration for a network endpoint.","type":"object","properties":{"Aliases":{"type":"array","items":{"type":"string"},"example":["server_x","server_y"]},"DriverOpts":{"description":"DriverOpts is a mapping of driver options and values. These options\nare passed directly to the driver and are driver specific.\n","type":"object","additionalProperties":{"type":"string"},"x-nullable":true,"example":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"}},"EndpointID":{"description":"Unique ID for the service endpoint in a Sandbox.\n","type":"string","example":"b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"},"Gateway":{"description":"Gateway address for this network.\n","type":"string","example":"172.17.0.1"},"GlobalIPv6Address":{"description":"Global IPv6 address.\n","type":"string","example":"2001:db8::5689"},"GlobalIPv6PrefixLen":{"description":"Mask length of the global IPv6 address.\n","type":"integer","format":"int64","example":64},"IPAMConfig":{"$ref":"#/definitions/EndpointIPAMConfig"},"IPAddress":{"description":"IPv4 address.\n","type":"string","example":"172.17.0.4"},"IPPrefixLen":{"description":"Mask length of the IPv4 address.\n","type":"integer","example":16},"IPv6Gateway":{"description":"IPv6 gateway address.\n","type":"string","example":"2001:db8:2::100"},"Links":{"type":"array","items":{"type":"string"},"example":["container_1","container_2"]},"MacAddress":{"description":"MAC address for the endpoint on this network.\n","type":"string","example":"02:42:ac:11:00:04"},"NetworkID":{"description":"Unique ID of the network.\n","type":"string","example":"08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"}}},"EndpointSpec":{"description":"Properties that can be configured to access and load balance a service.","type":"object","properties":{"Mode":{"description":"The mode of resolution to use for internal load balancing between tasks.","type":"string","default":"vip","enum":["vip","dnsrr"]},"Ports":{"description":"List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used.","type":"array","items":{"$ref":"#/definitions/EndpointPortConfig"}}}},"EngineDescription":{"description":"EngineDescription provides information about an engine.","type":"object","properties":{"EngineVersion":{"type":"string","example":"17.06.0"},"Labels":{"type":"object","additionalProperties":{"type":"string"},"example":{"foo":"bar"}},"Plugins":{"type":"array","items":{"type":"object","properties":{"Name":{"type":"string"},"Type":{"type":"string"}}},"example":[{"Name":"awslogs","Type":"Log"},{"Name":"fluentd","Type":"Log"},{"Name":"gcplogs","Type":"Log"},{"Name":"gelf","Type":"Log"},{"Name":"journald","Type":"Log"},{"Name":"json-file","Type":"Log"},{"Name":"logentries","Type":"Log"},{"Name":"splunk","Type":"Log"},{"Name":"syslog","Type":"Log"},{"Name":"bridge","Type":"Network"},{"Name":"host","Type":"Network"},{"Name":"ipvlan","Type":"Network"},{"Name":"macvlan","Type":"Network"},{"Name":"null","Type":"Network"},{"Name":"overlay","Type":"Network"},{"Name":"local","Type":"Volume"},{"Name":"localhost:5000/vieux/sshfs:latest","Type":"Volume"},{"Name":"vieux/sshfs:latest","Type":"Volume"}]}}},"ErrorDetail":{"type":"object","properties":{"code":{"type":"integer"},"message":{"type":"string"}}},"ErrorResponse":{"description":"Represents an error.","type":"object","required":["message"],"properties":{"message":{"description":"The error message.","type":"string","x-nullable":false}},"example":{"message":"Something went wrong."}},"GenericResources":{"description":"User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)","type":"array","items":{"type":"object","properties":{"DiscreteResourceSpec":{"type":"object","properties":{"Kind":{"type":"string"},"Value":{"type":"integer","format":"int64"}}},"NamedResourceSpec":{"type":"object","properties":{"Kind":{"type":"string"},"Value":{"type":"string"}}}}},"example":[{"DiscreteResourceSpec":{"Kind":"SSD","Value":3}},{"NamedResourceSpec":{"Kind":"GPU","Value":"UUID1"}},{"NamedResourceSpec":{"Kind":"GPU","Value":"UUID2"}}]},"GraphDriverData":{"description":"Information about a container's graph driver.","type":"object","required":["Name","Data"],"properties":{"Data":{"type":"object","additionalProperties":{"type":"string"},"x-nullable":false},"Name":{"type":"string","x-nullable":false}}},"HealthConfig":{"description":"A test to perform to check that the container is healthy.","type":"object","properties":{"Interval":{"description":"The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit.","type":"integer"},"Retries":{"description":"The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit.","type":"integer"},"StartPeriod":{"description":"Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit.","type":"integer"},"Test":{"description":"The test to perform. Possible values are:\n\n- `[]` inherit healthcheck from image or parent image\n- `[\"NONE\"]` disable healthcheck\n- `[\"CMD\", args...]` exec arguments directly\n- `[\"CMD-SHELL\", command]` run command with system's default shell\n","type":"array","items":{"type":"string"}},"Timeout":{"description":"The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit.","type":"integer"}}},"HostConfig":{"description":"Container configuration that depends on the host we are running on","allOf":[{"$ref":"#/definitions/Resources"},{"type":"object","properties":{"AutoRemove":{"description":"Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set.","type":"boolean"},"Binds":{"description":"A list of volume bindings for this container. Each volume binding is a string in one of these forms:\n\n- `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path.\n- `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path.\n- `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path.\n- `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path.\n","type":"array","items":{"type":"string"}},"CapAdd":{"description":"A list of kernel capabilities to add to the container.","type":"array","items":{"type":"string"}},"CapDrop":{"description":"A list of kernel capabilities to drop from the container.","type":"array","items":{"type":"string"}},"Cgroup":{"description":"Cgroup to use for the container.","type":"string"},"ConsoleSize":{"description":"Initial console size, as an `[height, width]` array. (Windows only)","type":"array","maxItems":2,"minItems":2,"items":{"type":"integer","minimum":0}},"ContainerIDFile":{"description":"Path to a file where the container ID is written","type":"string"},"Dns":{"description":"A list of DNS servers for the container to use.","type":"array","items":{"type":"string"}},"DnsOptions":{"description":"A list of DNS options.","type":"array","items":{"type":"string"}},"DnsSearch":{"description":"A list of DNS search domains.","type":"array","items":{"type":"string"}},"ExtraHosts":{"description":"A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `[\"hostname:IP\"]`.\n","type":"array","items":{"type":"string"}},"GroupAdd":{"description":"A list of additional groups that the container process will run as.","type":"array","items":{"type":"string"}},"IpcMode":{"description":"IPC sharing mode for the container. Possible values are:\n\n- `\"none\"`: own private IPC namespace, with /dev/shm not mounted\n- `\"private\"`: own private IPC namespace\n- `\"shareable\"`: own private IPC namespace, with a possibility to share it with other containers\n- `\"container:\"`: join another (shareable) container's IPC namespace\n- `\"host\"`: use the host system's IPC namespace\n\nIf not specified, daemon default is used, which can either be `\"private\"`\nor `\"shareable\"`, depending on daemon version and configuration.\n","type":"string"},"Isolation":{"description":"Isolation technology of the container. (Windows only)","type":"string","enum":["default","process","hyperv"]},"Links":{"description":"A list of links for the container in the form `container_name:alias`.","type":"array","items":{"type":"string"}},"LogConfig":{"description":"The logging configuration for this container","type":"object","properties":{"Config":{"type":"object","additionalProperties":{"type":"string"}},"Type":{"type":"string","enum":["json-file","syslog","journald","gelf","fluentd","awslogs","splunk","etwlogs","none"]}}},"MaskedPaths":{"description":"The list of paths to be masked inside the container (this overrides the default set of paths)","type":"array","items":{"type":"string"}},"Mounts":{"description":"Specification for mounts to be added to the container.","type":"array","items":{"$ref":"#/definitions/Mount"}},"NetworkMode":{"description":"Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken as a custom network's name to which this container should connect to.","type":"string"},"OomScoreAdj":{"description":"An integer value containing the score given to the container in order to tune OOM killer preferences.","type":"integer","example":500},"PidMode":{"description":"Set the PID (Process) Namespace mode for the container. It can be either:\n\n- `\"container:\"`: joins another container's PID namespace\n- `\"host\"`: use the host's PID namespace inside the container\n","type":"string"},"PortBindings":{"$ref":"#/definitions/PortMap"},"Privileged":{"description":"Gives the container full access to the host.","type":"boolean"},"PublishAllPorts":{"description":"Allocates an ephemeral host port for all of a container's\nexposed ports.\n\nPorts are de-allocated when the container stops and allocated when the container starts.\nThe allocated port might be changed when restarting the container.\n\nThe port is selected from the ephemeral port range that depends on the kernel.\nFor example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.\n","type":"boolean"},"ReadonlyPaths":{"description":"The list of paths to be set as read-only inside the container (this overrides the default set of paths)","type":"array","items":{"type":"string"}},"ReadonlyRootfs":{"description":"Mount the container's root filesystem as read only.","type":"boolean"},"RestartPolicy":{"$ref":"#/definitions/RestartPolicy"},"Runtime":{"description":"Runtime to use with this container.","type":"string"},"SecurityOpt":{"description":"A list of string values to customize labels for MLS systems, such as SELinux.","type":"array","items":{"type":"string"}},"ShmSize":{"description":"Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.","type":"integer","minimum":0},"StorageOpt":{"description":"Storage driver options for this container, in the form `{\"size\": \"120G\"}`.\n","type":"object","additionalProperties":{"type":"string"}},"Sysctls":{"description":"A list of kernel parameters (sysctls) to set in the container. For example: `{\"net.ipv4.ip_forward\": \"1\"}`\n","type":"object","additionalProperties":{"type":"string"}},"Tmpfs":{"description":"A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ \"/run\": \"rw,noexec,nosuid,size=65536k\" }`.\n","type":"object","additionalProperties":{"type":"string"}},"UTSMode":{"description":"UTS namespace to use for the container.","type":"string"},"UsernsMode":{"description":"Sets the usernamespace mode for the container when usernamespace remapping option is enabled.","type":"string"},"VolumeDriver":{"description":"Driver that this container uses to mount volumes.","type":"string"},"VolumesFrom":{"description":"A list of volumes to inherit from another container, specified in the form `[:]`.","type":"array","items":{"type":"string"}}}}]},"IPAM":{"type":"object","properties":{"Config":{"description":"List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`","type":"array","items":{"type":"object","additionalProperties":{"type":"string"}}},"Driver":{"description":"Name of the IPAM driver to use.","type":"string","default":"default"},"Options":{"description":"Driver-specific options, specified as a map.","type":"array","items":{"type":"object","additionalProperties":{"type":"string"}}}}},"IdResponse":{"description":"Response to an API call that returns just an Id","type":"object","required":["Id"],"properties":{"Id":{"description":"The id of the newly created object.","type":"string","x-nullable":false}}},"Image":{"type":"object","required":["Id","Parent","Comment","Created","Container","DockerVersion","Author","Architecture","Os","Size","VirtualSize","GraphDriver","RootFS"],"properties":{"Architecture":{"type":"string","x-nullable":false},"Author":{"type":"string","x-nullable":false},"Comment":{"type":"string","x-nullable":false},"Config":{"$ref":"#/definitions/ContainerConfig"},"Container":{"type":"string","x-nullable":false},"ContainerConfig":{"$ref":"#/definitions/ContainerConfig"},"Created":{"type":"string","x-nullable":false},"DockerVersion":{"type":"string","x-nullable":false},"GraphDriver":{"$ref":"#/definitions/GraphDriverData"},"Id":{"type":"string","x-nullable":false},"Metadata":{"type":"object","properties":{"LastTagTime":{"type":"string","format":"dateTime"}}},"Os":{"type":"string","x-nullable":false},"OsVersion":{"type":"string"},"Parent":{"type":"string","x-nullable":false},"RepoDigests":{"type":"array","items":{"type":"string"}},"RepoTags":{"type":"array","items":{"type":"string"}},"RootFS":{"type":"object","required":["Type"],"properties":{"BaseLayer":{"type":"string"},"Layers":{"type":"array","items":{"type":"string"}},"Type":{"type":"string","x-nullable":false}}},"Size":{"type":"integer","format":"int64","x-nullable":false},"VirtualSize":{"type":"integer","format":"int64","x-nullable":false}}},"ImageDeleteResponseItem":{"type":"object","properties":{"Deleted":{"description":"The image ID of an image that was deleted","type":"string"},"Untagged":{"description":"The image ID of an image that was untagged","type":"string"}}},"ImageID":{"description":"Image ID or Digest","type":"object","properties":{"ID":{"type":"string"}},"example":{"ID":"sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"}},"ImageSummary":{"type":"object","required":["Id","ParentId","RepoTags","RepoDigests","Created","Size","SharedSize","VirtualSize","Labels","Containers"],"properties":{"Containers":{"type":"integer","x-nullable":false},"Created":{"type":"integer","x-nullable":false},"Id":{"type":"string","x-nullable":false},"Labels":{"type":"object","additionalProperties":{"type":"string"},"x-nullable":false},"ParentId":{"type":"string","x-nullable":false},"RepoDigests":{"type":"array","items":{"type":"string"},"x-nullable":false},"RepoTags":{"type":"array","items":{"type":"string"},"x-nullable":false},"SharedSize":{"type":"integer","x-nullable":false},"Size":{"type":"integer","x-nullable":false},"VirtualSize":{"type":"integer","x-nullable":false}}},"IndexInfo":{"description":"IndexInfo contains information about a registry.","type":"object","properties":{"Mirrors":{"description":"List of mirrors, expressed as URIs.\n","type":"array","items":{"type":"string"},"example":["https://hub-mirror.corp.example.com:5000/","https://registry-2.docker.io/","https://registry-3.docker.io/"]},"Name":{"description":"Name of the registry, such as \"docker.io\".\n","type":"string","example":"docker.io"},"Official":{"description":"Indicates whether this is an official registry (i.e., Docker Hub / docker.io)\n","type":"boolean","example":true},"Secure":{"description":"Indicates if the registry is part of the list of insecure\nregistries.\n\nIf `false`, the registry is insecure. Insecure registries accept\nun-encrypted (HTTP) and/or untrusted (HTTPS with certificates from\nunknown CAs) communication.\n\n> **Warning**: Insecure registries can be useful when running a local\n> registry. However, because its use creates security vulnerabilities\n> it should ONLY be enabled for testing purposes. For increased\n> security, users should add their CA to their system's list of\n> trusted CAs instead of enabling this option.\n","type":"boolean","example":true}},"x-nullable":true},"JoinTokens":{"description":"JoinTokens contains the tokens workers and managers need to join the swarm.\n","type":"object","properties":{"Manager":{"description":"The token managers can use to join the swarm.\n","type":"string","example":"SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"},"Worker":{"description":"The token workers can use to join the swarm.\n","type":"string","example":"SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"}}},"LocalNodeState":{"description":"Current local status of this node.","type":"string","default":"","enum":["","inactive","pending","active","error","locked"],"example":"active"},"ManagerStatus":{"description":"ManagerStatus represents the status of a manager.\n\nIt provides the current status of a node's manager component, if the node\nis a manager.\n","type":"object","properties":{"Addr":{"description":"The IP address and port at which the manager is reachable.\n","type":"string","example":"10.0.0.46:2377"},"Leader":{"type":"boolean","default":false,"example":true},"Reachability":{"$ref":"#/definitions/Reachability"}},"x-nullable":true},"Mount":{"type":"object","properties":{"BindOptions":{"description":"Optional configuration for the `bind` type.","type":"object","properties":{"Propagation":{"description":"A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.","type":"string","enum":["private","rprivate","shared","rshared","slave","rslave"]}}},"Consistency":{"description":"The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`.","type":"string"},"ReadOnly":{"description":"Whether the mount should be read-only.","type":"boolean"},"Source":{"description":"Mount source (e.g. a volume name, a host path).","type":"string"},"Target":{"description":"Container path.","type":"string"},"TmpfsOptions":{"description":"Optional configuration for the `tmpfs` type.","type":"object","properties":{"Mode":{"description":"The permission mode for the tmpfs mount in an integer.","type":"integer"},"SizeBytes":{"description":"The size for the tmpfs mount in bytes.","type":"integer","format":"int64"}}},"Type":{"description":"The mount type. Available types:\n\n- `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.\n- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.\n- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.\n","type":"string","enum":["bind","volume","tmpfs"]},"VolumeOptions":{"description":"Optional configuration for the `volume` type.","type":"object","properties":{"DriverConfig":{"description":"Map of driver specific options","type":"object","properties":{"Name":{"description":"Name of the driver to use to create the volume.","type":"string"},"Options":{"description":"key/value map of driver specific options.","type":"object","additionalProperties":{"type":"string"}}}},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"NoCopy":{"description":"Populate volume with data from the target.","type":"boolean","default":false}}}}},"MountPoint":{"description":"A mount point inside a container","type":"object","properties":{"Destination":{"type":"string"},"Driver":{"type":"string"},"Mode":{"type":"string"},"Name":{"type":"string"},"Propagation":{"type":"string"},"RW":{"type":"boolean"},"Source":{"type":"string"},"Type":{"type":"string"}}},"Network":{"type":"object","properties":{"Attachable":{"type":"boolean"},"Containers":{"type":"object","additionalProperties":{"$ref":"#/definitions/NetworkContainer"}},"Created":{"type":"string","format":"dateTime"},"Driver":{"type":"string"},"EnableIPv6":{"type":"boolean"},"IPAM":{"$ref":"#/definitions/IPAM"},"Id":{"type":"string"},"Ingress":{"type":"boolean"},"Internal":{"type":"boolean"},"Labels":{"type":"object","additionalProperties":{"type":"string"}},"Name":{"type":"string"},"Options":{"type":"object","additionalProperties":{"type":"string"}},"Scope":{"type":"string"}},"example":{"Attachable":false,"Containers":{"19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c":{"EndpointID":"628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a","IPv4Address":"172.19.0.2/16","IPv6Address":"","MacAddress":"02:42:ac:13:00:02","Name":"test"}},"Created":"2016-10-19T04:33:30.360899459Z","Driver":"bridge","EnableIPv6":false,"IPAM":{"Config":[{"Gateway":"172.19.0.1","Subnet":"172.19.0.0/16"}],"Driver":"default","Options":{"foo":"bar"}},"Id":"7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99","Ingress":false,"Internal":false,"Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Name":"net01","Options":{"com.docker.network.bridge.default_bridge":"true","com.docker.network.bridge.enable_icc":"true","com.docker.network.bridge.enable_ip_masquerade":"true","com.docker.network.bridge.host_binding_ipv4":"0.0.0.0","com.docker.network.bridge.name":"docker0","com.docker.network.driver.mtu":"1500"},"Scope":"local"}},"NetworkContainer":{"type":"object","properties":{"EndpointID":{"type":"string"},"IPv4Address":{"type":"string"},"IPv6Address":{"type":"string"},"MacAddress":{"type":"string"},"Name":{"type":"string"}}},"NetworkSettings":{"description":"NetworkSettings exposes the network settings in the API","type":"object","properties":{"Bridge":{"description":"Name of the network'a bridge (for example, `docker0`).","type":"string","example":"docker0"},"EndpointID":{"description":"EndpointID uniquely represents a service endpoint in a Sandbox.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"},"Gateway":{"description":"Gateway address for the default \"bridge\" network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"172.17.0.1"},"GlobalIPv6Address":{"description":"Global IPv6 address for the default \"bridge\" network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"2001:db8::5689"},"GlobalIPv6PrefixLen":{"description":"Mask length of the global IPv6 address.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"integer","example":64},"HairpinMode":{"description":"Indicates if hairpin NAT should be enabled on the virtual interface.\n","type":"boolean","example":false},"IPAddress":{"description":"IPv4 address for the default \"bridge\" network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"172.17.0.4"},"IPPrefixLen":{"description":"Mask length of the IPv4 address.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"integer","example":16},"IPv6Gateway":{"description":"IPv6 gateway address for this network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"2001:db8:2::100"},"LinkLocalIPv6Address":{"description":"IPv6 unicast address using the link-local prefix.","type":"string","example":"fe80::42:acff:fe11:1"},"LinkLocalIPv6PrefixLen":{"description":"Prefix length of the IPv6 unicast address.","type":"integer","example":"64"},"MacAddress":{"description":"MAC address for the container on the default \"bridge\" network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"02:42:ac:11:00:04"},"Networks":{"description":"Information about all networks that the container is connected to.\n","type":"object","additionalProperties":{"$ref":"#/definitions/EndpointSettings"}},"Ports":{"$ref":"#/definitions/PortMap"},"SandboxID":{"description":"SandboxID uniquely represents a container's network stack.","type":"string","example":"9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"},"SandboxKey":{"description":"SandboxKey identifies the sandbox","type":"string","example":"/var/run/docker/netns/8ab54b426c38"},"SecondaryIPAddresses":{"type":"array","items":{"$ref":"#/definitions/Address"},"x-nullable":true},"SecondaryIPv6Addresses":{"type":"array","items":{"$ref":"#/definitions/Address"},"x-nullable":true}}},"Node":{"type":"object","properties":{"CreatedAt":{"description":"Date and time at which the node was added to the swarm in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n","type":"string","format":"dateTime","example":"2016-08-18T10:44:24.496525531Z"},"Description":{"$ref":"#/definitions/NodeDescription"},"ID":{"type":"string","example":"24ifsmvkjbyhk"},"ManagerStatus":{"$ref":"#/definitions/ManagerStatus"},"Spec":{"$ref":"#/definitions/NodeSpec"},"Status":{"$ref":"#/definitions/NodeStatus"},"UpdatedAt":{"description":"Date and time at which the node was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n","type":"string","format":"dateTime","example":"2017-08-09T07:09:37.632105588Z"},"Version":{"$ref":"#/definitions/ObjectVersion"}}},"NodeDescription":{"description":"NodeDescription encapsulates the properties of the Node as reported by the\nagent.\n","type":"object","properties":{"Engine":{"$ref":"#/definitions/EngineDescription"},"Hostname":{"type":"string","example":"bf3067039e47"},"Platform":{"$ref":"#/definitions/Platform"},"Resources":{"$ref":"#/definitions/ResourceObject"},"TLSInfo":{"$ref":"#/definitions/TLSInfo"}}},"NodeSpec":{"type":"object","properties":{"Availability":{"description":"Availability of the node.","type":"string","enum":["active","pause","drain"],"example":"active"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"Name for the node.","type":"string","example":"my-node"},"Role":{"description":"Role of the node.","type":"string","enum":["worker","manager"],"example":"manager"}},"example":{"Availability":"active","Labels":{"foo":"bar"},"Name":"node-name","Role":"manager"}},"NodeState":{"description":"NodeState represents the state of a node.","type":"string","enum":["unknown","down","ready","disconnected"],"example":"ready"},"NodeStatus":{"description":"NodeStatus represents the status of a node.\n\nIt provides the current status of the node, as seen by the manager.\n","type":"object","properties":{"Addr":{"description":"IP address of the node.","type":"string","example":"172.17.0.2"},"Message":{"type":"string","example":""},"State":{"$ref":"#/definitions/NodeState"}}},"ObjectVersion":{"description":"The version number of the object such as node, service, etc. This is needed to avoid conflicting writes.\nThe client must send the version number along with the modified specification when updating these objects.\nThis approach ensures safe concurrency and determinism in that the change on the object\nmay not be applied if the version number has changed from the last read. In other words,\nif two update requests specify the same base version, only one of the requests can succeed.\nAs a result, two separate update requests that happen at the same time will not\nunintentionally overwrite each other.\n","type":"object","properties":{"Index":{"type":"integer","format":"uint64","example":373531}}},"PeerNode":{"description":"Represents a peer-node in the swarm","properties":{"Addr":{"description":"IP address and ports at which this node can be reached.\n","type":"string"},"NodeID":{"description":"Unique identifier of for this node in the swarm.","type":"string"}}},"Platform":{"description":"Platform represents the platform (Arch/OS).\n","type":"object","properties":{"Architecture":{"description":"Architecture represents the hardware architecture (for example,\n`x86_64`).\n","type":"string","example":"x86_64"},"OS":{"description":"OS represents the Operating System (for example, `linux` or `windows`).\n","type":"string","example":"linux"}}},"Plugin":{"description":"A plugin for the Engine API","type":"object","required":["Settings","Enabled","Config","Name"],"properties":{"Config":{"description":"The config of a plugin.","type":"object","required":["Description","Documentation","Interface","Entrypoint","WorkDir","Network","Linux","PidHost","PropagatedMount","IpcHost","Mounts","Env","Args"],"properties":{"Args":{"type":"object","required":["Name","Description","Settable","Value"],"properties":{"Description":{"type":"string","x-nullable":false,"example":"command line arguments"},"Name":{"type":"string","x-nullable":false,"example":"args"},"Settable":{"type":"array","items":{"type":"string"}},"Value":{"type":"array","items":{"type":"string"}}},"x-nullable":false},"Description":{"type":"string","x-nullable":false,"example":"A sample volume plugin for Docker"},"DockerVersion":{"description":"Docker Version used to create the plugin","type":"string","x-nullable":false,"example":"17.06.0-ce"},"Documentation":{"type":"string","x-nullable":false,"example":"https://docs.docker.com/engine/extend/plugins/"},"Entrypoint":{"type":"array","items":{"type":"string"},"example":["/usr/bin/sample-volume-plugin","/data"]},"Env":{"type":"array","items":{"$ref":"#/definitions/PluginEnv"},"example":[{"Description":"If set, prints debug messages","Name":"DEBUG","Settable":null,"Value":"0"}]},"Interface":{"description":"The interface between Docker and the plugin","type":"object","required":["Types","Socket"],"properties":{"ProtocolScheme":{"description":"Protocol to use for clients connecting to the plugin.","type":"string","enum":["","moby.plugins.http/v1"],"example":"some.protocol/v1.0"},"Socket":{"type":"string","x-nullable":false,"example":"plugins.sock"},"Types":{"type":"array","items":{"$ref":"#/definitions/PluginInterfaceType"},"example":["docker.volumedriver/1.0"]}},"x-nullable":false},"IpcHost":{"type":"boolean","x-nullable":false,"example":false},"Linux":{"type":"object","required":["Capabilities","AllowAllDevices","Devices"],"properties":{"AllowAllDevices":{"type":"boolean","x-nullable":false,"example":false},"Capabilities":{"type":"array","items":{"type":"string"},"example":["CAP_SYS_ADMIN","CAP_SYSLOG"]},"Devices":{"type":"array","items":{"$ref":"#/definitions/PluginDevice"}}},"x-nullable":false},"Mounts":{"type":"array","items":{"$ref":"#/definitions/PluginMount"}},"Network":{"type":"object","required":["Type"],"properties":{"Type":{"type":"string","x-nullable":false,"example":"host"}},"x-nullable":false},"PidHost":{"type":"boolean","x-nullable":false,"example":false},"PropagatedMount":{"type":"string","x-nullable":false,"example":"/mnt/volumes"},"User":{"type":"object","properties":{"GID":{"type":"integer","format":"uint32","example":1000},"UID":{"type":"integer","format":"uint32","example":1000}},"x-nullable":false},"WorkDir":{"type":"string","x-nullable":false,"example":"/bin/"},"rootfs":{"type":"object","properties":{"diff_ids":{"type":"array","items":{"type":"string"},"example":["sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887","sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"]},"type":{"type":"string","example":"layers"}}}},"x-nullable":false},"Enabled":{"description":"True if the plugin is running. False if the plugin is not running, only installed.","type":"boolean","x-nullable":false,"example":true},"Id":{"type":"string","example":"5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"},"Name":{"type":"string","x-nullable":false,"example":"tiborvass/sample-volume-plugin"},"PluginReference":{"description":"plugin remote reference used to push/pull the plugin","type":"string","x-nullable":false,"example":"localhost:5000/tiborvass/sample-volume-plugin:latest"},"Settings":{"description":"Settings that can be modified by users.","type":"object","required":["Args","Devices","Env","Mounts"],"properties":{"Args":{"type":"array","items":{"type":"string"}},"Devices":{"type":"array","items":{"$ref":"#/definitions/PluginDevice"}},"Env":{"type":"array","items":{"type":"string"},"example":["DEBUG=0"]},"Mounts":{"type":"array","items":{"$ref":"#/definitions/PluginMount"}}},"x-nullable":false}}},"PluginDevice":{"type":"object","required":["Name","Description","Settable","Path"],"properties":{"Description":{"type":"string","x-nullable":false},"Name":{"type":"string","x-nullable":false},"Path":{"type":"string","example":"/dev/fuse"},"Settable":{"type":"array","items":{"type":"string"}}},"x-nullable":false},"PluginEnv":{"type":"object","required":["Name","Description","Settable","Value"],"properties":{"Description":{"type":"string","x-nullable":false},"Name":{"type":"string","x-nullable":false},"Settable":{"type":"array","items":{"type":"string"}},"Value":{"type":"string"}},"x-nullable":false},"PluginInterfaceType":{"type":"object","required":["Prefix","Capability","Version"],"properties":{"Capability":{"type":"string","x-nullable":false},"Prefix":{"type":"string","x-nullable":false},"Version":{"type":"string","x-nullable":false}},"x-nullable":false},"PluginMount":{"type":"object","required":["Name","Description","Settable","Source","Destination","Type","Options"],"properties":{"Description":{"type":"string","x-nullable":false,"example":"This is a mount that's used by the plugin."},"Destination":{"type":"string","x-nullable":false,"example":"/mnt/state"},"Name":{"type":"string","x-nullable":false,"example":"some-mount"},"Options":{"type":"array","items":{"type":"string"},"example":["rbind","rw"]},"Settable":{"type":"array","items":{"type":"string"}},"Source":{"type":"string","example":"/var/lib/docker/plugins/"},"Type":{"type":"string","x-nullable":false,"example":"bind"}},"x-nullable":false},"PluginsInfo":{"description":"Available plugins per type.\n\n


    \n\n> **Note**: Only unmanaged (V1) plugins are included in this list.\n> V1 plugins are \"lazily\" loaded, and are not returned in this list\n> if there is no resource using the plugin.\n","type":"object","properties":{"Authorization":{"description":"Names of available authorization plugins.","type":"array","items":{"type":"string"},"example":["img-authz-plugin","hbm"]},"Log":{"description":"Names of available logging-drivers, and logging-driver plugins.","type":"array","items":{"type":"string"},"example":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"Network":{"description":"Names of available network-drivers, and network-driver plugins.","type":"array","items":{"type":"string"},"example":["bridge","host","ipvlan","macvlan","null","overlay"]},"Volume":{"description":"Names of available volume-drivers, and network-driver plugins.","type":"array","items":{"type":"string"},"example":["local"]}}},"Port":{"description":"An open port on a container","type":"object","required":["PrivatePort","Type"],"properties":{"IP":{"description":"Host IP address that the container's port is mapped to","type":"string","format":"ip-address"},"PrivatePort":{"description":"Port on the container","type":"integer","format":"uint16","x-nullable":false},"PublicPort":{"description":"Port exposed on the host","type":"integer","format":"uint16"},"Type":{"type":"string","enum":["tcp","udp","sctp"],"x-nullable":false}},"example":{"PrivatePort":8080,"PublicPort":80,"Type":"tcp"}},"PortBinding":{"description":"PortBinding represents a binding between a host IP address and a host\nport.\n","type":"object","properties":{"HostIp":{"description":"Host IP address that the container's port is mapped to.","type":"string","example":"127.0.0.1"},"HostPort":{"description":"Host port number that the container's port is mapped to.","type":"string","example":"4443"}},"x-nullable":true},"PortMap":{"description":"PortMap describes the mapping of container ports to host ports, using the\ncontainer's port-number and protocol as key in the format `/`,\nfor example, `80/udp`.\n\nIf a container's port is mapped for multiple protocols, separate entries\nare added to the mapping table.\n","type":"object","additionalProperties":{"type":"array","items":{"$ref":"#/definitions/PortBinding"}},"example":{"2377/tcp":null,"443/tcp":[{"HostIp":"127.0.0.1","HostPort":"4443"}],"53/udp":[{"HostIp":"0.0.0.0","HostPort":"53"}],"80/tcp":[{"HostIp":"0.0.0.0","HostPort":"80"},{"HostIp":"0.0.0.0","HostPort":"8080"}],"80/udp":[{"HostIp":"0.0.0.0","HostPort":"80"}]}},"ProcessConfig":{"type":"object","properties":{"arguments":{"type":"array","items":{"type":"string"}},"entrypoint":{"type":"string"},"privileged":{"type":"boolean"},"tty":{"type":"boolean"},"user":{"type":"string"}}},"ProgressDetail":{"type":"object","properties":{"current":{"type":"integer"},"total":{"type":"integer"}}},"PushImageInfo":{"type":"object","properties":{"error":{"type":"string"},"progress":{"type":"string"},"progressDetail":{"$ref":"#/definitions/ProgressDetail"},"status":{"type":"string"}}},"Reachability":{"description":"Reachability represents the reachability of a node.","type":"string","enum":["unknown","unreachable","reachable"],"example":"reachable"},"RegistryServiceConfig":{"description":"RegistryServiceConfig stores daemon registry services configuration.\n","type":"object","properties":{"AllowNondistributableArtifactsCIDRs":{"description":"List of IP ranges to which nondistributable artifacts can be pushed,\nusing the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior, and enables the daemon to\npush nondistributable artifacts to all registries whose resolved IP\naddress is within the subnet described by the CIDR syntax.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n","type":"array","items":{"type":"string"},"example":["::1/128","127.0.0.0/8"]},"AllowNondistributableArtifactsHostnames":{"description":"List of registry hostnames to which nondistributable artifacts can be\npushed, using the format `[:]` or `[:]`.\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior for the specified\nregistries.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n","type":"array","items":{"type":"string"},"example":["registry.internal.corp.example.com:3000","[2001:db8:a0b:12f0::1]:443"]},"IndexConfigs":{"type":"object","additionalProperties":{"$ref":"#/definitions/IndexInfo"},"example":{"127.0.0.1:5000":{"Mirrors":[],"Name":"127.0.0.1:5000","Official":false,"Secure":false},"[2001:db8:a0b:12f0::1]:80":{"Mirrors":[],"Name":"[2001:db8:a0b:12f0::1]:80","Official":false,"Secure":false},"docker.io":{"Mirrors":["https://hub-mirror.corp.example.com:5000/"],"Name":"docker.io","Official":true,"Secure":true},"registry.internal.corp.example.com:3000":{"Mirrors":[],"Name":"registry.internal.corp.example.com:3000","Official":false,"Secure":false}}},"InsecureRegistryCIDRs":{"description":"List of IP ranges of insecure registries, using the CIDR syntax\n([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries\naccept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates\nfrom unknown CAs) communication.\n\nBy default, local registries (`127.0.0.0/8`) are configured as\ninsecure. All other registries are secure. Communicating with an\ninsecure registry is not possible if the daemon assumes that registry\nis secure.\n\nThis configuration override this behavior, insecure communication with\nregistries whose resolved IP address is within the subnet described by\nthe CIDR syntax.\n\nRegistries can also be marked insecure by hostname. Those registries\nare listed under `IndexConfigs` and have their `Secure` field set to\n`false`.\n\n> **Warning**: Using this option can be useful when running a local\n> registry, but introduces security vulnerabilities. This option\n> should therefore ONLY be used for testing purposes. For increased\n> security, users should add their CA to their system's list of trusted\n> CAs instead of enabling this option.\n","type":"array","items":{"type":"string"},"example":["::1/128","127.0.0.0/8"]},"Mirrors":{"description":"List of registry URLs that act as a mirror for the official\n(`docker.io`) registry.\n","type":"array","items":{"type":"string"},"example":["https://hub-mirror.corp.example.com:5000/","https://[2001:db8:a0b:12f0::1]/"]}},"x-nullable":true},"ResourceObject":{"description":"An object describing the resources which can be advertised by a node and requested by a task","type":"object","properties":{"GenericResources":{"$ref":"#/definitions/GenericResources"},"MemoryBytes":{"type":"integer","format":"int64","example":8272408576},"NanoCPUs":{"type":"integer","format":"int64","example":4000000000}}},"Resources":{"description":"A container's resources (cgroups config, ulimits, etc)","type":"object","properties":{"BlkioDeviceReadBps":{"description":"Limit read rate (bytes per second) from a device, in the form `[{\"Path\": \"device_path\", \"Rate\": rate}]`.\n","type":"array","items":{"$ref":"#/definitions/ThrottleDevice"}},"BlkioDeviceReadIOps":{"description":"Limit read rate (IO per second) from a device, in the form `[{\"Path\": \"device_path\", \"Rate\": rate}]`.\n","type":"array","items":{"$ref":"#/definitions/ThrottleDevice"}},"BlkioDeviceWriteBps":{"description":"Limit write rate (bytes per second) to a device, in the form `[{\"Path\": \"device_path\", \"Rate\": rate}]`.\n","type":"array","items":{"$ref":"#/definitions/ThrottleDevice"}},"BlkioDeviceWriteIOps":{"description":"Limit write rate (IO per second) to a device, in the form `[{\"Path\": \"device_path\", \"Rate\": rate}]`.\n","type":"array","items":{"$ref":"#/definitions/ThrottleDevice"}},"BlkioWeight":{"description":"Block IO weight (relative weight).","type":"integer","maximum":1000,"minimum":0},"BlkioWeightDevice":{"description":"Block IO weight (relative device weight) in the form `[{\"Path\": \"device_path\", \"Weight\": weight}]`.\n","type":"array","items":{"type":"object","properties":{"Path":{"type":"string"},"Weight":{"type":"integer","minimum":0}}}},"CgroupParent":{"description":"Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist.","type":"string"},"CpuCount":{"description":"The number of usable CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.\n","type":"integer","format":"int64"},"CpuPercent":{"description":"The usable percentage of the available CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.\n","type":"integer","format":"int64"},"CpuPeriod":{"description":"The length of a CPU period in microseconds.","type":"integer","format":"int64"},"CpuQuota":{"description":"Microseconds of CPU time that the container can get in a CPU period.","type":"integer","format":"int64"},"CpuRealtimePeriod":{"description":"The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks.","type":"integer","format":"int64"},"CpuRealtimeRuntime":{"description":"The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks.","type":"integer","format":"int64"},"CpuShares":{"description":"An integer value representing this container's relative CPU weight versus other containers.","type":"integer"},"CpusetCpus":{"description":"CPUs in which to allow execution (e.g., `0-3`, `0,1`)","type":"string","example":"0-3"},"CpusetMems":{"description":"Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.","type":"string"},"DeviceCgroupRules":{"description":"a list of cgroup rules to apply to the container","type":"array","items":{"type":"string","example":"c 13:* rwm"}},"Devices":{"description":"A list of devices to add to the container.","type":"array","items":{"$ref":"#/definitions/DeviceMapping"}},"DiskQuota":{"description":"Disk limit (in bytes).","type":"integer","format":"int64"},"IOMaximumBandwidth":{"description":"Maximum IO in bytes per second for the container system drive (Windows only)","type":"integer","format":"int64"},"IOMaximumIOps":{"description":"Maximum IOps for the container system drive (Windows only)","type":"integer","format":"int64"},"Init":{"description":"Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used.","type":"boolean","x-nullable":true},"KernelMemory":{"description":"Kernel memory limit in bytes.","type":"integer","format":"int64"},"Memory":{"description":"Memory limit in bytes.","type":"integer","format":"int64","default":0},"MemoryReservation":{"description":"Memory soft limit in bytes.","type":"integer","format":"int64"},"MemorySwap":{"description":"Total memory limit (memory + swap). Set as `-1` to enable unlimited swap.","type":"integer","format":"int64"},"MemorySwappiness":{"description":"Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.","type":"integer","format":"int64","maximum":100,"minimum":0},"NanoCPUs":{"description":"CPU quota in units of 10-9 CPUs.","type":"integer","format":"int64"},"OomKillDisable":{"description":"Disable OOM Killer for the container.","type":"boolean"},"PidsLimit":{"description":"Tune a container's pids limit. Set -1 for unlimited.","type":"integer","format":"int64"},"Ulimits":{"description":"A list of resource limits to set in the container. For example: `{\"Name\": \"nofile\", \"Soft\": 1024, \"Hard\": 2048}`\"\n","type":"array","items":{"type":"object","properties":{"Hard":{"description":"Hard limit","type":"integer"},"Name":{"description":"Name of ulimit","type":"string"},"Soft":{"description":"Soft limit","type":"integer"}}}}}},"RestartPolicy":{"description":"The behavior to apply when the container exits. The default is not to restart.\n\nAn ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server.\n","type":"object","properties":{"MaximumRetryCount":{"description":"If `on-failure` is used, the number of times to retry before giving up","type":"integer"},"Name":{"description":"- Empty string means not to restart\n- `always` Always restart\n- `unless-stopped` Restart always except when the user has manually stopped the container\n- `on-failure` Restart only when the container exit code is non-zero\n","type":"string","enum":["","always","unless-stopped","on-failure"]}}},"Runtime":{"description":"Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec)\nruntime.\n\nThe runtime is invoked by the daemon via the `containerd` daemon. OCI\nruntimes act as an interface to the Linux kernel namespaces, cgroups,\nand SELinux.\n","type":"object","properties":{"path":{"description":"Name and, optional, path, of the OCI executable binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n","type":"string","example":"/usr/local/bin/my-oci-runtime"},"runtimeArgs":{"description":"List of command-line arguments to pass to the runtime when invoked.\n","type":"array","items":{"type":"string"},"x-nullable":true,"example":["--debug","--systemd-cgroup=false"]}}},"Secret":{"type":"object","properties":{"CreatedAt":{"type":"string","format":"dateTime","example":"2017-07-20T13:55:28.678958722Z"},"ID":{"type":"string","example":"blt1owaxmitz71s9v5zh81zun"},"Spec":{"$ref":"#/definitions/SecretSpec"},"UpdatedAt":{"type":"string","format":"dateTime","example":"2017-07-20T13:55:28.678958722Z"},"Version":{"$ref":"#/definitions/ObjectVersion"}}},"SecretSpec":{"type":"object","properties":{"Data":{"description":"Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))\ndata to store as secret.\n\nThis field is only used to _create_ a secret, and is not returned by\nother endpoints.\n","type":"string","example":""},"Driver":{"description":"Name of the secrets driver used to fetch the secret's value from an external secret store","$ref":"#/definitions/Driver"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"},"example":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"}},"Name":{"description":"User-defined name of the secret.","type":"string"},"Templating":{"description":"Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n","$ref":"#/definitions/Driver"}}},"Service":{"type":"object","properties":{"CreatedAt":{"type":"string","format":"dateTime"},"Endpoint":{"type":"object","properties":{"Ports":{"type":"array","items":{"$ref":"#/definitions/EndpointPortConfig"}},"Spec":{"$ref":"#/definitions/EndpointSpec"},"VirtualIPs":{"type":"array","items":{"type":"object","properties":{"Addr":{"type":"string"},"NetworkID":{"type":"string"}}}}}},"ID":{"type":"string"},"Spec":{"$ref":"#/definitions/ServiceSpec"},"UpdateStatus":{"description":"The status of a service update.","type":"object","properties":{"CompletedAt":{"type":"string","format":"dateTime"},"Message":{"type":"string"},"StartedAt":{"type":"string","format":"dateTime"},"State":{"type":"string","enum":["updating","paused","completed"]}}},"UpdatedAt":{"type":"string","format":"dateTime"},"Version":{"$ref":"#/definitions/ObjectVersion"}},"example":{"CreatedAt":"2016-06-07T21:05:51.880065305Z","Endpoint":{"Ports":[{"Protocol":"tcp","PublishedPort":30001,"TargetPort":6379}],"Spec":{"Mode":"vip","Ports":[{"Protocol":"tcp","PublishedPort":30001,"TargetPort":6379}]},"VirtualIPs":[{"Addr":"10.255.0.2/16","NetworkID":"4qvuz4ko70xaltuqbt8956gd1"},{"Addr":"10.255.0.3/16","NetworkID":"4qvuz4ko70xaltuqbt8956gd1"}]},"ID":"9mnpnzenvg8p8tdbtq4wvbkcz","Spec":{"EndpointSpec":{"Mode":"vip","Ports":[{"Protocol":"tcp","PublishedPort":30001,"TargetPort":6379}]},"Mode":{"Replicated":{"Replicas":1}},"Name":"hopeful_cori","RollbackConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":1},"TaskTemplate":{"ContainerSpec":{"Image":"redis"},"ForceUpdate":0,"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"UpdateConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":1}},"UpdatedAt":"2016-06-07T21:07:29.962229872Z","Version":{"Index":19}}},"ServiceSpec":{"description":"User modifiable configuration for a service.","properties":{"EndpointSpec":{"$ref":"#/definitions/EndpointSpec"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Mode":{"description":"Scheduling mode for the service.","type":"object","properties":{"Global":{"type":"object"},"Replicated":{"type":"object","properties":{"Replicas":{"type":"integer","format":"int64"}}}}},"Name":{"description":"Name of the service.","type":"string"},"Networks":{"description":"Array of network names or IDs to attach the service to.","type":"array","items":{"type":"object","properties":{"Aliases":{"type":"array","items":{"type":"string"}},"Target":{"type":"string"}}}},"RollbackConfig":{"description":"Specification for the rollback strategy of the service.","type":"object","properties":{"Delay":{"description":"Amount of time between rollback iterations, in nanoseconds.","type":"integer","format":"int64"},"FailureAction":{"description":"Action to take if an rolled back task fails to run, or stops running during the rollback.","type":"string","enum":["continue","pause"]},"MaxFailureRatio":{"description":"The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1.","type":"number","default":0},"Monitor":{"description":"Amount of time to monitor each rolled back task for failures, in nanoseconds.","type":"integer","format":"int64"},"Order":{"description":"The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down.","type":"string","enum":["stop-first","start-first"]},"Parallelism":{"description":"Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism).","type":"integer","format":"int64"}}},"TaskTemplate":{"$ref":"#/definitions/TaskSpec"},"UpdateConfig":{"description":"Specification for the update strategy of the service.","type":"object","properties":{"Delay":{"description":"Amount of time between updates, in nanoseconds.","type":"integer","format":"int64"},"FailureAction":{"description":"Action to take if an updated task fails to run, or stops running during the update.","type":"string","enum":["continue","pause","rollback"]},"MaxFailureRatio":{"description":"The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1.","type":"number","default":0},"Monitor":{"description":"Amount of time to monitor each updated task for failures, in nanoseconds.","type":"integer","format":"int64"},"Order":{"description":"The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down.","type":"string","enum":["stop-first","start-first"]},"Parallelism":{"description":"Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism).","type":"integer","format":"int64"}}}}},"ServiceUpdateResponse":{"type":"object","properties":{"Warnings":{"description":"Optional warning messages","type":"array","items":{"type":"string"}}},"example":{"Warning":"unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"}},"Swarm":{"type":"object","allOf":[{"$ref":"#/definitions/ClusterInfo"},{"type":"object","properties":{"JoinTokens":{"$ref":"#/definitions/JoinTokens"}}}]},"SwarmInfo":{"description":"Represents generic information about swarm.\n","type":"object","properties":{"Cluster":{"$ref":"#/definitions/ClusterInfo"},"ControlAvailable":{"type":"boolean","default":false,"example":true},"Error":{"type":"string","default":""},"LocalNodeState":{"$ref":"#/definitions/LocalNodeState"},"Managers":{"description":"Total number of managers in the swarm.","type":"integer","x-nullable":true,"example":3},"NodeAddr":{"description":"IP address at which this node can be reached by other nodes in the\nswarm.\n","type":"string","default":"","example":"10.0.0.46"},"NodeID":{"description":"Unique identifier of for this node in the swarm.","type":"string","default":"","example":"k67qz4598weg5unwwffg6z1m1"},"Nodes":{"description":"Total number of nodes in the swarm.","type":"integer","x-nullable":true,"example":4},"RemoteManagers":{"description":"List of ID's and addresses of other managers in the swarm.\n","type":"array","items":{"$ref":"#/definitions/PeerNode"},"x-nullable":true,"example":[{"Addr":"10.0.0.158:2377","NodeID":"71izy0goik036k48jg985xnds"},{"Addr":"10.0.0.159:2377","NodeID":"79y6h1o4gv8n120drcprv5nmc"},{"Addr":"10.0.0.46:2377","NodeID":"k67qz4598weg5unwwffg6z1m1"}]}}},"SwarmSpec":{"description":"User modifiable swarm configuration.","type":"object","properties":{"CAConfig":{"description":"CA configuration.","type":"object","properties":{"ExternalCAs":{"description":"Configuration for forwarding signing requests to an external certificate authority.","type":"array","items":{"type":"object","properties":{"CACert":{"description":"The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided).","type":"string"},"Options":{"description":"An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver.","type":"object","additionalProperties":{"type":"string"}},"Protocol":{"description":"Protocol for communication with the external CA (currently only `cfssl` is supported).","type":"string","default":"cfssl","enum":["cfssl"]},"URL":{"description":"URL where certificate signing requests should be sent.","type":"string"}}}},"ForceRotate":{"description":"An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`","type":"integer","format":"uint64"},"NodeCertExpiry":{"description":"The duration node certificates are issued for.","type":"integer","format":"int64","example":7776000000000000},"SigningCACert":{"description":"The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.","type":"string"},"SigningCAKey":{"description":"The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.","type":"string"}},"x-nullable":true},"Dispatcher":{"description":"Dispatcher configuration.","type":"object","properties":{"HeartbeatPeriod":{"description":"The delay for an agent to send a heartbeat to the dispatcher.","type":"integer","format":"int64","example":5000000000}},"x-nullable":true},"EncryptionConfig":{"description":"Parameters related to encryption-at-rest.","type":"object","properties":{"AutoLockManagers":{"description":"If set, generate a key and use it to lock data stored on the managers.","type":"boolean","example":false}}},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"},"example":{"com.example.corp.department":"engineering","com.example.corp.type":"production"}},"Name":{"description":"Name of the swarm.","type":"string","example":"default"},"Orchestration":{"description":"Orchestration configuration.","type":"object","properties":{"TaskHistoryRetentionLimit":{"description":"The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks.","type":"integer","format":"int64","example":10}},"x-nullable":true},"Raft":{"description":"Raft configuration.","type":"object","properties":{"ElectionTick":{"description":"The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`.\n\nA tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.\n","type":"integer","example":3},"HeartbeatTick":{"description":"The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers.\n\nA tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.\n","type":"integer","example":1},"KeepOldSnapshots":{"description":"The number of snapshots to keep beyond the current snapshot.","type":"integer","format":"uint64"},"LogEntriesForSlowFollowers":{"description":"The number of log entries to keep around to sync up slow followers after a snapshot is created.","type":"integer","format":"uint64","example":500},"SnapshotInterval":{"description":"The number of log entries between snapshots.","type":"integer","format":"uint64","example":10000}}},"TaskDefaults":{"description":"Defaults for creating tasks in this cluster.","type":"object","properties":{"LogDriver":{"description":"The log driver to use for tasks created in the orchestrator if\nunspecified by a service.\n\nUpdating this value only affects new tasks. Existing tasks continue\nto use their previously configured log driver until recreated.\n","type":"object","properties":{"Name":{"description":"The log driver to use as a default for new tasks.\n","type":"string","example":"json-file"},"Options":{"description":"Driver-specific options for the selectd log driver, specified\nas key/value pairs.\n","type":"object","additionalProperties":{"type":"string"},"example":{"max-file":"10","max-size":"100m"}}}}}}}},"SystemInfo":{"type":"object","properties":{"Architecture":{"description":"Hardware architecture of the host, as returned by the Go runtime\n(`GOARCH`).\n\nA full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).\n","type":"string","example":"x86_64"},"BridgeNfIp6tables":{"description":"Indicates if `bridge-nf-call-ip6tables` is available on the host.","type":"boolean","example":true},"BridgeNfIptables":{"description":"Indicates if `bridge-nf-call-iptables` is available on the host.","type":"boolean","example":true},"CPUSet":{"description":"Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.\n\nSee [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)\n","type":"boolean","example":true},"CPUShares":{"description":"Indicates if CPU Shares limiting is supported by the host.","type":"boolean","example":true},"CgroupDriver":{"description":"The driver to use for managing cgroups.\n","type":"string","default":"cgroupfs","enum":["cgroupfs","systemd"],"example":"cgroupfs"},"ClusterAdvertise":{"description":"The network endpoint that the Engine advertises for the purpose of\nnode discovery. ClusterAdvertise is a `host:port` combination on which\nthe daemon is reachable by other hosts.\n\n


    \n\n> **Note**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n","type":"string","example":"node5.corp.example.com:8000"},"ClusterStore":{"description":"URL of the distributed storage backend.\n\n\nThe storage backend is used for multihost networking (to store\nnetwork and endpoint information) and by the node discovery mechanism.\n\n


    \n\n> **Note**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n","type":"string","example":"consul://consul.corp.example.com:8600/some/path"},"ContainerdCommit":{"$ref":"#/definitions/Commit"},"Containers":{"description":"Total number of containers on the host.","type":"integer","example":14},"ContainersPaused":{"description":"Number of containers with status `\"paused\"`.\n","type":"integer","example":1},"ContainersRunning":{"description":"Number of containers with status `\"running\"`.\n","type":"integer","example":3},"ContainersStopped":{"description":"Number of containers with status `\"stopped\"`.\n","type":"integer","example":10},"CpuCfsPeriod":{"description":"Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host.","type":"boolean","example":true},"CpuCfsQuota":{"description":"Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host.","type":"boolean","example":true},"Debug":{"description":"Indicates if the daemon is running in debug-mode / with debug-level logging enabled.","type":"boolean","example":true},"DefaultRuntime":{"description":"Name of the default OCI runtime that is used when starting containers.\n\nThe default can be overridden per-container at create time.\n","type":"string","default":"runc","example":"runc"},"DockerRootDir":{"description":"Root directory of persistent Docker state.\n\nDefaults to `/var/lib/docker` on Linux, and `C:\\ProgramData\\docker`\non Windows.\n","type":"string","example":"/var/lib/docker"},"Driver":{"description":"Name of the storage driver in use.","type":"string","example":"overlay2"},"DriverStatus":{"description":"Information specific to the storage driver, provided as\n\"label\" / \"value\" pairs.\n\nThis information is provided by the storage driver, and formatted\nin a way consistent with the output of `docker info` on the command\nline.\n\n


    \n\n> **Note**: The information returned in this field, including the\n> formatting of values and labels, should not be considered stable,\n> and may change without notice.\n","type":"array","items":{"type":"array","items":{"type":"string"}},"example":[["Backing Filesystem","extfs"],["Supports d_type","true"],["Native Overlay Diff","true"]]},"ExperimentalBuild":{"description":"Indicates if experimental features are enabled on the daemon.\n","type":"boolean","example":true},"GenericResources":{"$ref":"#/definitions/GenericResources"},"HttpProxy":{"description":"HTTP-proxy configured for the daemon. This value is obtained from the\n[`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\n\nContainers do not automatically inherit this configuration.\n","type":"string","example":"http://user:pass@proxy.corp.example.com:8080"},"HttpsProxy":{"description":"HTTPS-proxy configured for the daemon. This value is obtained from the\n[`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\n\nContainers do not automatically inherit this configuration.\n","type":"string","example":"https://user:pass@proxy.corp.example.com:4443"},"ID":{"description":"Unique identifier of the daemon.\n\n


    \n\n> **Note**: The format of the ID itself is not part of the API, and\n> should not be considered stable.\n","type":"string","example":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"},"IPv4Forwarding":{"description":"Indicates IPv4 forwarding is enabled.","type":"boolean","example":true},"Images":{"description":"Total number of images on the host.\n\nBoth _tagged_ and _untagged_ (dangling) images are counted.\n","type":"integer","example":508},"IndexServerAddress":{"description":"Address / URL of the index server that is used for image search,\nand as a default for user authentication for Docker Hub and Docker Cloud.\n","type":"string","default":"https://index.docker.io/v1/","example":"https://index.docker.io/v1/"},"InitBinary":{"description":"Name and, optional, path of the `docker-init` binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n","type":"string","example":"docker-init"},"InitCommit":{"$ref":"#/definitions/Commit"},"Isolation":{"description":"Represents the isolation technology to use as a default for containers.\nThe supported values are platform-specific.\n\nIf no isolation value is specified on daemon start, on Windows client,\nthe default is `hyperv`, and on Windows server, the default is `process`.\n\nThis option is currently not used on other platforms.\n","type":"string","default":"default","enum":["default","hyperv","process"]},"KernelMemory":{"description":"Indicates if the host has kernel memory limit support enabled.","type":"boolean","example":true},"KernelVersion":{"description":"Kernel version of the host.\n\nOn Linux, this information obtained from `uname`. On Windows this\ninformation is queried from the HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\\nregistry value, for example _\"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)\"_.\n","type":"string","example":"4.9.38-moby"},"Labels":{"description":"User-defined labels (key/value metadata) as set on the daemon.\n\n


    \n\n> **Note**: When part of a Swarm, nodes can both have _daemon_ labels,\n> set through the daemon configuration, and _node_ labels, set from a\n> manager node in the Swarm. Node labels are not included in this\n> field. Node labels can be retrieved using the `/nodes/(id)` endpoint\n> on a manager node in the Swarm.\n","type":"array","items":{"type":"string"},"example":["storage=ssd","production"]},"LiveRestoreEnabled":{"description":"Indicates if live restore is enabled.\n\nIf enabled, containers are kept running when the daemon is shutdown\nor upon daemon start if running containers are detected.\n","type":"boolean","default":false,"example":false},"LoggingDriver":{"description":"The logging driver to use as a default for new containers.\n","type":"string"},"MemTotal":{"description":"Total amount of physical memory available on the host, in kilobytes (kB).\n","type":"integer","format":"int64","example":2095882240},"MemoryLimit":{"description":"Indicates if the host has memory limit support enabled.","type":"boolean","example":true},"NCPU":{"description":"The number of logical CPUs usable by the daemon.\n\nThe number of available CPUs is checked by querying the operating\nsystem when the daemon starts. Changes to operating system CPU\nallocation after the daemon is started are not reflected.\n","type":"integer","example":4},"NEventsListener":{"description":"Number of event listeners subscribed.","type":"integer","example":30},"NFd":{"description":"The total number of file Descriptors in use by the daemon process.\n\nThis information is only returned if debug-mode is enabled.\n","type":"integer","example":64},"NGoroutines":{"description":"The number of goroutines that currently exist.\n\nThis information is only returned if debug-mode is enabled.\n","type":"integer","example":174},"Name":{"description":"Hostname of the host.","type":"string","example":"node5.corp.example.com"},"NoProxy":{"description":"Comma-separated list of domain extensions for which no proxy should be\nused. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)\nenvironment variable.\n\nContainers do not automatically inherit this configuration.\n","type":"string","example":"*.local, 169.254/16"},"OSType":{"description":"Generic type of the operating system of the host, as returned by the\nGo runtime (`GOOS`).\n\nCurrently returned values are \"linux\" and \"windows\". A full list of\npossible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).\n","type":"string","example":"linux"},"OomKillDisable":{"description":"Indicates if OOM killer disable is supported on the host.","type":"boolean"},"OperatingSystem":{"description":"Name of the host's operating system, for example: \"Ubuntu 16.04.2 LTS\"\nor \"Windows Server 2016 Datacenter\"\n","type":"string","example":"Alpine Linux v3.5"},"Plugins":{"$ref":"#/definitions/PluginsInfo"},"ProductLicense":{"description":"Reports a summary of the product license on the daemon.\n\nIf a commercial license has been applied to the daemon, information\nsuch as number of nodes, and expiration are included.\n","type":"string","example":"Community Engine"},"RegistryConfig":{"$ref":"#/definitions/RegistryServiceConfig"},"RuncCommit":{"$ref":"#/definitions/Commit"},"Runtimes":{"description":"List of [OCI compliant](https://github.com/opencontainers/runtime-spec)\nruntimes configured on the daemon. Keys hold the \"name\" used to\nreference the runtime.\n\nThe Docker daemon relies on an OCI compliant runtime (invoked via the\n`containerd` daemon) as its interface to the Linux kernel namespaces,\ncgroups, and SELinux.\n\nThe default runtime is `runc`, and automatically configured. Additional\nruntimes can be configured by the user and will be listed here.\n","type":"object","default":{"runc":{"path":"docker-runc"}},"additionalProperties":{"$ref":"#/definitions/Runtime"},"example":{"custom":{"path":"/usr/local/bin/my-oci-runtime","runtimeArgs":["--debug","--systemd-cgroup=false"]},"runc":{"path":"docker-runc"},"runc-master":{"path":"/go/bin/runc"}}},"SecurityOptions":{"description":"List of security features that are enabled on the daemon, such as\napparmor, seccomp, SELinux, and user-namespaces (userns).\n\nAdditional configuration options for each security feature may\nbe present, and are included as a comma-separated list of key/value\npairs.\n","type":"array","items":{"type":"string"},"example":["name=apparmor","name=seccomp,profile=default","name=selinux","name=userns"]},"ServerVersion":{"description":"Version string of the daemon.\n\n> **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/)\n> returns the Swarm version instead of the daemon version, for example\n> `swarm/1.2.8`.\n","type":"string","example":"17.06.0-ce"},"SwapLimit":{"description":"Indicates if the host has memory swap limit support enabled.","type":"boolean","example":true},"Swarm":{"$ref":"#/definitions/SwarmInfo"},"SystemStatus":{"description":"Status information about this node (standalone Swarm API).\n\n


    \n\n> **Note**: The information returned in this field is only propagated\n> by the Swarm standalone API, and is empty (`null`) when using\n> built-in swarm mode.\n","type":"array","items":{"type":"array","items":{"type":"string"}},"example":[["Role","primary"],["State","Healthy"],["Strategy","spread"],["Filters","health, port, containerslots, dependency, affinity, constraint, whitelist"],["Nodes","2"],[" swarm-agent-00","192.168.99.102:2376"],[" └ ID","5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"],[" └ Status","Healthy"],[" └ Containers","1 (1 Running, 0 Paused, 0 Stopped)"],[" └ Reserved CPUs","0 / 1"],[" └ Reserved Memory","0 B / 1.021 GiB"],[" └ Labels","kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"],[" └ UpdatedAt","2017-08-09T10:03:46Z"],[" └ ServerVersion","17.06.0-ce"],[" swarm-manager","192.168.99.101:2376"],[" └ ID","TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"],[" └ Status","Healthy"],[" └ Containers","2 (2 Running, 0 Paused, 0 Stopped)"],[" └ Reserved CPUs","0 / 1"],[" └ Reserved Memory","0 B / 1.021 GiB"],[" └ Labels","kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"],[" └ UpdatedAt","2017-08-09T10:04:11Z"],[" └ ServerVersion","17.06.0-ce"]]},"SystemTime":{"description":"Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat with nano-seconds.\n","type":"string","example":"2017-08-08T20:28:29.06202363Z"},"Warnings":{"description":"List of warnings / informational messages about missing features, or\nissues related to the daemon configuration.\n\nThese messages can be printed by the client as information to the user.\n","type":"array","items":{"type":"string"},"example":["WARNING: No memory limit support","WARNING: bridge-nf-call-iptables is disabled","WARNING: bridge-nf-call-ip6tables is disabled"]}}},"TLSInfo":{"description":"Information about the issuer of leaf TLS certificates and the trusted root CA certificate","type":"object","properties":{"CertIssuerPublicKey":{"description":"The base64-url-safe-encoded raw public key bytes of the issuer","type":"string"},"CertIssuerSubject":{"description":"The base64-url-safe-encoded raw subject bytes of the issuer","type":"string"},"TrustRoot":{"description":"The root CA certificate(s) that are used to validate leaf TLS certificates","type":"string"}},"example":{"CertIssuerPublicKey":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==","CertIssuerSubject":"MBMxETAPBgNVBAMTCHN3YXJtLWNh","TrustRoot":"-----BEGIN CERTIFICATE-----\nMIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0\nMzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf\n3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO\nPQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz\npxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H\n-----END CERTIFICATE-----\n"}},"Task":{"type":"object","properties":{"AssignedGenericResources":{"$ref":"#/definitions/GenericResources"},"CreatedAt":{"type":"string","format":"dateTime"},"DesiredState":{"$ref":"#/definitions/TaskState"},"ID":{"description":"The ID of the task.","type":"string"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"Name of the task.","type":"string"},"NodeID":{"description":"The ID of the node that this task is on.","type":"string"},"ServiceID":{"description":"The ID of the service this task is part of.","type":"string"},"Slot":{"type":"integer"},"Spec":{"$ref":"#/definitions/TaskSpec"},"Status":{"type":"object","properties":{"ContainerStatus":{"type":"object","properties":{"ContainerID":{"type":"string"},"ExitCode":{"type":"integer"},"PID":{"type":"integer"}}},"Err":{"type":"string"},"Message":{"type":"string"},"State":{"$ref":"#/definitions/TaskState"},"Timestamp":{"type":"string","format":"dateTime"}}},"UpdatedAt":{"type":"string","format":"dateTime"},"Version":{"$ref":"#/definitions/ObjectVersion"}},"example":{"AssignedGenericResources":[{"DiscreteResourceSpec":{"Kind":"SSD","Value":3}},{"NamedResourceSpec":{"Kind":"GPU","Value":"UUID1"}},{"NamedResourceSpec":{"Kind":"GPU","Value":"UUID2"}}],"CreatedAt":"2016-06-07T21:07:31.171892745Z","DesiredState":"running","ID":"0kzzo1i0y4jz6027t0k7aezc7","NetworksAttachments":[{"Addresses":["10.255.0.10/16"],"Network":{"CreatedAt":"2016-06-07T20:31:11.912919752Z","DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"ID":"4qvuz4ko70xaltuqbt8956gd1","IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{"Name":"default"}},"Spec":{"DriverConfiguration":{},"IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{}},"Labels":{"com.docker.swarm.internal":"true"},"Name":"ingress"},"UpdatedAt":"2016-06-07T21:07:29.955277358Z","Version":{"Index":18}}}],"NodeID":"60gvrl6tm78dmak4yl7srz94v","ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"Spec":{"ContainerSpec":{"Image":"redis"},"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"Status":{"ContainerStatus":{"ContainerID":"e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035","PID":677},"Message":"started","State":"running","Timestamp":"2016-06-07T21:07:31.290032978Z"},"UpdatedAt":"2016-06-07T21:07:31.376370513Z","Version":{"Index":71}}},"TaskSpec":{"description":"User modifiable task configuration.","type":"object","properties":{"ContainerSpec":{"description":"Container spec for the service.\n\n


    \n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n","type":"object","properties":{"Args":{"description":"Arguments to the command.","type":"array","items":{"type":"string"}},"Command":{"description":"The command to be run in the image.","type":"array","items":{"type":"string"}},"Configs":{"description":"Configs contains references to zero or more configs that will be exposed to the service.","type":"array","items":{"type":"object","properties":{"ConfigID":{"description":"ConfigID represents the ID of the specific config that we're referencing.","type":"string"},"ConfigName":{"description":"ConfigName is the name of the config that this references, but this is just provided for\nlookup/display purposes. The config in the reference will be identified by its ID.\n","type":"string"},"File":{"description":"File represents a specific target that is backed by a file.","type":"object","properties":{"GID":{"description":"GID represents the file GID.","type":"string"},"Mode":{"description":"Mode represents the FileMode of the file.","type":"integer","format":"uint32"},"Name":{"description":"Name represents the final filename in the filesystem.","type":"string"},"UID":{"description":"UID represents the file UID.","type":"string"}}}}}},"DNSConfig":{"description":"Specification for DNS related configurations in resolver configuration file (`resolv.conf`).","type":"object","properties":{"Nameservers":{"description":"The IP addresses of the name servers.","type":"array","items":{"type":"string"}},"Options":{"description":"A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.).","type":"array","items":{"type":"string"}},"Search":{"description":"A search list for host-name lookup.","type":"array","items":{"type":"string"}}}},"Dir":{"description":"The working directory for commands to run in.","type":"string"},"Env":{"description":"A list of environment variables in the form `VAR=value`.","type":"array","items":{"type":"string"}},"Groups":{"description":"A list of additional groups that the container process will run as.","type":"array","items":{"type":"string"}},"HealthCheck":{"$ref":"#/definitions/HealthConfig"},"Hostname":{"description":"The hostname to use for the container, as a valid RFC 1123 hostname.","type":"string"},"Hosts":{"description":"A list of hostname/IP mappings to add to the container's `hosts`\nfile. The format of extra hosts is specified in the\n[hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)\nman page:\n\n IP_address canonical_hostname [aliases...]\n","type":"array","items":{"type":"string"}},"Image":{"description":"The image name to use for the container","type":"string"},"Init":{"description":"Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used.","type":"boolean","x-nullable":true},"Isolation":{"description":"Isolation technology of the containers running the service. (Windows only)","type":"string","enum":["default","process","hyperv"]},"Labels":{"description":"User-defined key/value data.","type":"object","additionalProperties":{"type":"string"}},"Mounts":{"description":"Specification for mounts to be added to containers created as part of the service.","type":"array","items":{"$ref":"#/definitions/Mount"}},"OpenStdin":{"description":"Open `stdin`","type":"boolean"},"Privileges":{"description":"Security options for the container","type":"object","properties":{"CredentialSpec":{"description":"CredentialSpec for managed service account (Windows only)","type":"object","properties":{"File":{"description":"Load credential spec from this file. The file is read by the daemon, and must be present in the\n`CredentialSpecs` subdirectory in the docker data directory, which defaults to\n`C:\\ProgramData\\Docker\\` on Windows.\n\nFor example, specifying `spec.json` loads `C:\\ProgramData\\Docker\\CredentialSpecs\\spec.json`.\n\n


    \n\n> **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive.\n","type":"string"},"Registry":{"description":"Load credential spec from this value in the Windows registry. The specified registry value must be\nlocated in:\n\n`HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Virtualization\\Containers\\CredentialSpecs`\n\n


    \n\n\n> **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive.\n","type":"string"}}},"SELinuxContext":{"description":"SELinux labels of the container","type":"object","properties":{"Disable":{"description":"Disable SELinux","type":"boolean"},"Level":{"description":"SELinux level label","type":"string"},"Role":{"description":"SELinux role label","type":"string"},"Type":{"description":"SELinux type label","type":"string"},"User":{"description":"SELinux user label","type":"string"}}}}},"ReadOnly":{"description":"Mount the container's root filesystem as read only.","type":"boolean"},"Secrets":{"description":"Secrets contains references to zero or more secrets that will be exposed to the service.","type":"array","items":{"type":"object","properties":{"File":{"description":"File represents a specific target that is backed by a file.","type":"object","properties":{"GID":{"description":"GID represents the file GID.","type":"string"},"Mode":{"description":"Mode represents the FileMode of the file.","type":"integer","format":"uint32"},"Name":{"description":"Name represents the final filename in the filesystem.","type":"string"},"UID":{"description":"UID represents the file UID.","type":"string"}}},"SecretID":{"description":"SecretID represents the ID of the specific secret that we're referencing.","type":"string"},"SecretName":{"description":"SecretName is the name of the secret that this references, but this is just provided for\nlookup/display purposes. The secret in the reference will be identified by its ID.\n","type":"string"}}}},"StopGracePeriod":{"description":"Amount of time to wait for the container to terminate before forcefully killing it.","type":"integer","format":"int64"},"StopSignal":{"description":"Signal to stop the container.","type":"string"},"TTY":{"description":"Whether a pseudo-TTY should be allocated.","type":"boolean"},"User":{"description":"The user inside the container.","type":"string"}}},"ForceUpdate":{"description":"A counter that triggers an update even if no relevant parameters have been changed.","type":"integer"},"LogDriver":{"description":"Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified.","type":"object","properties":{"Name":{"type":"string"},"Options":{"type":"object","additionalProperties":{"type":"string"}}}},"NetworkAttachmentSpec":{"description":"Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n


    \n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n","type":"object","properties":{"ContainerID":{"description":"ID of the container represented by this task","type":"string"}}},"Networks":{"type":"array","items":{"type":"object","properties":{"Aliases":{"type":"array","items":{"type":"string"}},"Target":{"type":"string"}}}},"Placement":{"type":"object","properties":{"Constraints":{"description":"An array of constraints.","type":"array","items":{"type":"string"},"example":["node.hostname!=node3.corp.example.com","node.role!=manager","node.labels.type==production"]},"Platforms":{"description":"Platforms stores all the platforms that the service's image can\nrun on. This field is used in the platform filter for scheduling.\nIf empty, then the platform filter is off, meaning there are no\nscheduling restrictions.\n","type":"array","items":{"$ref":"#/definitions/Platform"}},"Preferences":{"description":"Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence.","type":"array","items":{"type":"object","properties":{"Spread":{"type":"object","properties":{"SpreadDescriptor":{"description":"label descriptor, such as engine.labels.az","type":"string"}}}}},"example":[{"Spread":{"SpreadDescriptor":"node.labels.datacenter"}},{"Spread":{"SpreadDescriptor":"node.labels.rack"}}]}}},"PluginSpec":{"description":"Plugin spec for the service. *(Experimental release only.)*\n\n


    \n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n","type":"object","properties":{"Disabled":{"description":"Disable the plugin once scheduled.","type":"boolean"},"Name":{"description":"The name or 'alias' to use for the plugin.","type":"string"},"PluginPrivilege":{"type":"array","items":{"description":"Describes a permission accepted by the user upon installing the plugin.","type":"object","properties":{"Description":{"type":"string"},"Name":{"type":"string"},"Value":{"type":"array","items":{"type":"string"}}}}},"Remote":{"description":"The plugin image reference to use.","type":"string"}}},"Resources":{"description":"Resource requirements which apply to each individual container created as part of the service.","type":"object","properties":{"Limits":{"description":"Define resources limits.","$ref":"#/definitions/ResourceObject"},"Reservation":{"description":"Define resources reservation.","$ref":"#/definitions/ResourceObject"}}},"RestartPolicy":{"description":"Specification for the restart policy which applies to containers created as part of this service.","type":"object","properties":{"Condition":{"description":"Condition for restart.","type":"string","enum":["none","on-failure","any"]},"Delay":{"description":"Delay between restart attempts.","type":"integer","format":"int64"},"MaxAttempts":{"description":"Maximum attempts to restart a given container before giving up (default value is 0, which is ignored).","type":"integer","format":"int64","default":0},"Window":{"description":"Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded).","type":"integer","format":"int64","default":0}}},"Runtime":{"description":"Runtime is the type of runtime specified for the task executor.","type":"string"}}},"TaskState":{"type":"string","enum":["new","allocated","pending","assigned","accepted","preparing","ready","starting","running","complete","shutdown","failed","rejected","remove","orphaned"]},"ThrottleDevice":{"type":"object","properties":{"Path":{"description":"Device path","type":"string"},"Rate":{"description":"Rate","type":"integer","format":"int64","minimum":0}}},"Volume":{"type":"object","required":["Name","Driver","Mountpoint","Labels","Scope","Options"],"properties":{"CreatedAt":{"description":"Date/Time the volume was created.","type":"string","format":"dateTime"},"Driver":{"description":"Name of the volume driver used by the volume.","type":"string","x-nullable":false},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"},"x-nullable":false},"Mountpoint":{"description":"Mount path of the volume on the host.","type":"string","x-nullable":false},"Name":{"description":"Name of the volume.","type":"string","x-nullable":false},"Options":{"description":"The driver specific options used when creating the volume.","type":"object","additionalProperties":{"type":"string"}},"Scope":{"description":"The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.","type":"string","default":"local","enum":["local","global"],"x-nullable":false},"Status":{"description":"Low-level details about the volume, provided by the volume driver.\nDetails are returned as a map with key/value pairs:\n`{\"key\":\"value\",\"key2\":\"value2\"}`.\n\nThe `Status` field is optional, and is omitted if the volume driver\ndoes not support this feature.\n","type":"object","additionalProperties":{"type":"object"}},"UsageData":{"description":"Usage details about the volume. This information is used by the\n`GET /system/df` endpoint, and omitted in other endpoints.\n","type":"object","required":["Size","RefCount"],"properties":{"RefCount":{"description":"The number of containers referencing this volume. This field\nis set to `-1` if the reference-count is not available.\n","type":"integer","default":-1,"x-nullable":false},"Size":{"description":"Amount of disk space used by the volume (in bytes). This information\nis only available for volumes created with the `\"local\"` volume\ndriver. For volumes created with other volume drivers, this field\nis set to `-1` (\"not available\")\n","type":"integer","default":-1,"x-nullable":false}},"x-nullable":true}},"example":{"CreatedAt":"2016-06-07T20:31:11.853781916Z","Driver":"custom","Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Mountpoint":"/var/lib/docker/volumes/tardis","Name":"tardis","Scope":"local","Status":{"hello":"world"}}},"api.putConfigOrLicenseResponse":{"id":"api.putConfigOrLicenseResponse","required":["message"],"properties":{"message":{"type":"string"}}},"auth.Credentials":{"id":"auth.Credentials","properties":{"password":{"type":"string"},"token":{"type":"string"},"username":{"type":"string"}}},"auth.LoginResponse":{"id":"auth.LoginResponse","properties":{"auth_token":{"type":"string"}}},"authz.Collection":{"id":"authz.Collection","required":["name","path","id","parent_ids","label_constraints","legacylabelkey","legacylabelvalue","created_at","updated_at"],"properties":{"created_at":{"description":"When the collection was created","type":"string","format":"date-time"},"id":{"description":"A unique ID for this collection","type":"string"},"label_constraints":{"description":"A set of label constraints to be applied to any service or container created in this collection","type":"array","items":{"$ref":"#/definitions/authz.LabelConstraint"}},"legacylabelkey":{"description":"The key of the legacy authorization label for this collection","type":"string"},"legacylabelvalue":{"description":"The value of the legacy authorization label for this collection","type":"string"},"name":{"description":"The name of the collection","type":"string"},"parent_ids":{"description":"A list of collection IDs of parent collections","type":"array","items":{"type":"string"}},"path":{"description":"The full path of the collection","type":"string"},"updated_at":{"description":"When the collection was updated","type":"string","format":"date-time"}}},"authz.CollectionCreate":{"id":"authz.CollectionCreate","required":["name","parent_id","label_constraints","legacy_label_key","legacy_label_value"],"properties":{"label_constraints":{"type":"array","items":{"$ref":"#/definitions/authz.LabelConstraint"}},"legacy_label_key":{"type":"string"},"legacy_label_value":{"type":"string"},"name":{"type":"string"},"parent_id":{"type":"string"}}},"authz.CollectionCreateResponse":{"id":"authz.CollectionCreateResponse","required":["id"],"properties":{"id":{"type":"string"}}},"authz.CollectionID":{"id":"authz.CollectionID","required":["id"],"properties":{"id":{"type":"string"}}},"authz.CollectionUpdate":{"id":"authz.CollectionUpdate","required":["label_constraints"],"properties":{"label_constraints":{"type":"array","items":{"$ref":"#/definitions/authz.LabelConstraint"}}}},"authz.LabelConstraint":{"id":"authz.LabelConstraint","required":["type","label_key","label_value","equality"],"properties":{"equality":{"type":"boolean"},"label_key":{"type":"string"},"label_value":{"type":"string"},"type":{"type":"string"}}},"authz.RoleCreateResponse":{"id":"authz.RoleCreateResponse","required":["id"],"properties":{"id":{"description":"The ID of the newly created role","type":"string"}}},"config.AuditLogConfiguration":{"id":"config.AuditLogConfiguration","required":["level","support_dump_include_audit_logs"],"properties":{"level":{"type":"string"},"support_dump_include_audit_logs":{"type":"boolean"}}},"config.AuthConfiguration":{"id":"config.AuthConfiguration","required":["sessions","saml","backend"],"properties":{"backend":{"description":"The name of the auth backend to use","type":"string","enum":["managed","ldap"]},"defaultNewUserRole":{"type":"string"},"saml":{"$ref":"#/definitions/forms.SAMLSettings"},"samlEnabled":{"description":"Whether SAML SSO is enabled in the system","type":"boolean"},"samlLoginText":{"description":"Customized SAML Login Text","type":"string"},"sessions":{"$ref":"#/definitions/forms.SessionsConfig"}}},"config.HTTPHeader":{"id":"config.HTTPHeader","required":["name","value"],"properties":{"name":{"type":"string"},"value":{"type":"string"}}},"config.LicenseConfiguration":{"id":"config.LicenseConfiguration","required":["auto_refresh","license_server_url","license_server_public_key"],"properties":{"auto_refresh":{"type":"boolean"},"license_server_public_key":{"type":"string"},"license_server_url":{"type":"string"}}},"config.LogConfiguration":{"id":"config.LogConfiguration","required":["level"],"properties":{"level":{"type":"string"}}},"config.SchedulingConfiguration":{"id":"config.SchedulingConfiguration","required":["enable_admin_ucp_scheduling","default_node_orchestrator"],"properties":{"default_node_orchestrator":{"type":"string"},"enable_admin_ucp_scheduling":{"type":"boolean"}}},"config.TrackingConfiguration":{"id":"config.TrackingConfiguration","required":["disable_usageinfo","disable_tracking","anonymize_tracking","ClusterLabel"],"properties":{"ClusterLabel":{"type":"string"},"anonymize_tracking":{"type":"boolean"},"disable_tracking":{"type":"boolean"},"disable_usageinfo":{"type":"boolean"}}},"config.TrustConfiguration":{"id":"config.TrustConfiguration","required":["require_content_trust","require_signature_from"],"properties":{"require_content_trust":{"type":"boolean"},"require_signature_from":{"type":"array","items":{"type":"string"}}}},"config.TrustedRegistryConfig":{"id":"config.TrustedRegistryConfig","required":["hostAddress","serviceID","caBundle","batchScanningDataEnabled"],"properties":{"batchScanningDataEnabled":{"type":"boolean"},"caBundle":{"type":"string"},"hostAddress":{"type":"string"},"serviceID":{"type":"string"}}},"config.UCPConfiguration":{"id":"config.UCPConfiguration","required":["auth","Registries","SchedulingConfiguration","TrackingConfiguration","TrustConfiguration","LogConfiguration","AuditLogConfiguration","LicenseConfiguration","customAPIServerHeaders","ClusterConfig"],"properties":{"AuditLogConfiguration":{"$ref":"#/definitions/config.AuditLogConfiguration"},"ClusterConfig":{"$ref":"#/definitions/types.ClusterConfig"},"LicenseConfiguration":{"$ref":"#/definitions/config.LicenseConfiguration"},"LogConfiguration":{"$ref":"#/definitions/config.LogConfiguration"},"Registries":{"type":"array","items":{"$ref":"#/definitions/config.TrustedRegistryConfig"}},"SchedulingConfiguration":{"$ref":"#/definitions/config.SchedulingConfiguration"},"TrackingConfiguration":{"$ref":"#/definitions/config.TrackingConfiguration"},"TrustConfiguration":{"$ref":"#/definitions/config.TrustConfiguration"},"auth":{"$ref":"#/definitions/config.AuthConfiguration"},"customAPIServerHeaders":{"type":"array","items":{"$ref":"#/definitions/config.HTTPHeader"}}}},"errors.APIError":{"id":"errors.APIError","required":["code","message"],"properties":{"code":{"type":"string"},"detail":{"$ref":"#/definitions/errors.APIError.detail"},"message":{"type":"string"}}},"errors.APIError.detail":{"id":"errors.APIError.detail"},"forms.BulkOperation":{"id":"forms.BulkOperation","required":["op"],"properties":{"op":{"description":"The operation to perform","type":"string"},"ref":{"description":"An identifier referencing the object on which to perform the operation, if applicable","type":"string"},"value":{"description":"The form value to submit for the operation, if applicable","type":"string"}}},"forms.BulkOperations":{"id":"forms.BulkOperations","required":["operations"],"properties":{"operations":{"type":"array","items":{"$ref":"#/definitions/forms.BulkOperation"}}}},"forms.Certificate":{"id":"forms.Certificate","required":["label","cert"],"properties":{"cert":{"description":"Encoded PEM for the cert","type":"string"},"label":{"description":"Label for the certificate","type":"string"}}},"forms.ChangePassword":{"id":"forms.ChangePassword","required":["oldPassword","newPassword"],"properties":{"newPassword":{"description":"User's new password","type":"string"},"oldPassword":{"description":"User's current password. Required if the client is changing their own password. May be omitted if an admin is changing another user's password","type":"string"}}},"forms.CreateAccount":{"id":"forms.CreateAccount","required":["name"],"properties":{"fullName":{"description":"Full name of account","type":"string"},"isActive":{"description":"Whether the user is active and can login (users only)","type":"boolean"},"isAdmin":{"description":"Whether the user is an admin (users only)","type":"boolean"},"isOrg":{"description":"Whether the account is an organization","type":"boolean"},"name":{"description":"Name of account","type":"string"},"password":{"description":"Password for the user (users only)","type":"string"},"searchLDAP":{"description":"Whether the user should be found by searching against the currently configured LDAP servers. If true, the password field may be omitted and the discovered full name of the user will be used if one is not specified in this form (users only)","type":"boolean"}}},"forms.CreateAccountPublicKey":{"id":"forms.CreateAccountPublicKey","required":["publicKey"],"properties":{"certificates":{"description":"certificates for the public key","type":"array","items":{"$ref":"#/definitions/forms.Certificate"}},"label":{"description":"Label or description for the key","type":"string"},"publicKey":{"description":"Encoded PEM for the public key","type":"string"}}},"forms.CreateTeam":{"id":"forms.CreateTeam","required":["name"],"properties":{"description":{"description":"Description of the team","type":"string"},"name":{"description":"Name of the team","type":"string"}}},"forms.GroupLinkOpts":{"id":"forms.GroupLinkOpts","required":["enableLink","groupName"],"properties":{"enableLink":{"description":"Whether to enable SAML linking. If false, all other fields are ignored","type":"boolean"},"groupName":{"description":"The group name that is obtained from group attribute of the SAML assertion","type":"string"}}},"forms.MemberSyncOpts":{"id":"forms.MemberSyncOpts","required":["enableSync","selectGroupMembers","groupDN","groupMemberAttr","searchBaseDN","searchScopeSubtree","searchFilter"],"properties":{"enableSync":{"description":"Whether to enable LDAP syncing. If false, all other fields are ignored","type":"boolean"},"groupDN":{"description":"The distinguished name of the LDAP group. Required if selectGroupMembers is true, ignored otherwise","type":"string"},"groupMemberAttr":{"description":"The name of the LDAP group entry attribute which corresponds to distinguished names of members. Required if selectGroupMembers is true, ignored otherwise","type":"string"},"searchBaseDN":{"description":"The distinguished name of the element from which the LDAP server will search for users. Required if selectGroupMembers is false, ignored otherwise","type":"string"},"searchFilter":{"description":"The LDAP search filter used to select users if selectGroupMembers is false, may be left blank","type":"string"},"searchScopeSubtree":{"description":"Whether to search for users in the entire subtree of the base DN or to only search one level under the base DN (if false). Required if selectGroupMembers is false, ignored otherwise","type":"boolean"},"selectGroupMembers":{"description":"Whether to sync using a group DN and member attribute selection or to use a search filter (if false)","type":"boolean"}}},"forms.SAMLSettings":{"id":"forms.SAMLSettings","required":["idpMetadataURL","spHost","rootCerts","tlsSkipVerify"],"properties":{"idpMetadataURL":{"description":"The Identity Provider's Metadata URL'","type":"string"},"rootCerts":{"description":"Root Certs to access IdP Metadata","type":"string"},"spHost":{"description":"The Host address of the Service Provider","type":"string"},"tlsSkipVerify":{"description":"Option for TLSSkipVerify","type":"boolean"}}},"forms.SessionsConfig":{"id":"forms.SessionsConfig","required":["lifetimeMinutes","renewalThresholdMinutes","perUserLimit"],"properties":{"lifetimeMinutes":{"description":"Specifies the initial lifetime (in minutes) of a session from the moment it is generated, minimum is 10 minutes","type":"integer","format":"integer"},"perUserLimit":{"description":"Indicates the maximum number of sessions that any user can have active at any given time. If creating a new session would put a user over this limit then the least recently used session will be deleted. A value of zero disables limiting the number of sessions that users may have","type":"integer","format":"integer"},"renewalThresholdMinutes":{"description":"Indicates a period of time (in minutes) before the expiration of a session where, if used, a session will be extended by the current configured lifetime from then, a zero value disables session extension, maximum is 5 minutes less than initial session lifetime","type":"integer","format":"integer"}}},"forms.SetMembership":{"id":"forms.SetMembership","properties":{"isAdmin":{"description":"Whether the member should be an admin of the organization or team (default false), unchanged if nil or omitted","type":"boolean"}}},"forms.UpdateAccount":{"id":"forms.UpdateAccount","properties":{"fullName":{"description":"Full name of account, unchanged if null or omitted","type":"string"},"isActive":{"description":"Whether the user is active and can login (users only), unchanged if null or omitted","type":"boolean"},"isAdmin":{"description":"Whether the user is an admin (users only), unchanged if null or omitted","type":"boolean"}}},"forms.UpdateAccountPublicKey":{"id":"forms.UpdateAccountPublicKey","properties":{"certificates":{"description":"certificates for the public key","type":"array","items":{"$ref":"#/definitions/forms.Certificate"}},"label":{"description":"Label or description for the key","type":"string"}}},"forms.UpdateTeam":{"id":"forms.UpdateTeam","properties":{"description":{"description":"Description of the team, unchanged if nil or omitted","type":"string"},"name":{"description":"Name of the team, unchanged if nil or omitted","type":"string"}}},"responses.Account":{"id":"responses.Account","required":["name","id","fullName","isOrg"],"properties":{"fullName":{"description":"Full Name of the account","type":"string"},"id":{"description":"ID of the account","type":"string"},"isActive":{"description":"Whether the user is active and can login (users only)","type":"boolean"},"isAdmin":{"description":"Whether the user is a system admin (users only)","type":"boolean"},"isImported":{"description":"Whether the user was imported from an upstream identity provider","type":"boolean"},"isOrg":{"description":"Whether the account is an organization (or user)","type":"boolean"},"membersCount":{"description":"The number of members of the organization","type":"integer","format":"int32"},"name":{"description":"Name of the account","type":"string"},"teamsCount":{"description":"The number of teams in the organization","type":"integer","format":"int32"}}},"responses.AccountPublicKey":{"id":"responses.AccountPublicKey","required":["id","accountID","publicKey","label"],"properties":{"accountID":{"description":"the ID of the account","type":"string"},"certificates":{"description":"certificates for the public key","type":"array","items":{"$ref":"#/definitions/responses.Certificate"}},"id":{"description":"the hash of the public key's DER bytes","type":"string"},"label":{"description":"the label or description for the key","type":"string"},"publicKey":{"description":"the encoded PEM of the public key","type":"string"}}},"responses.AccountPublicKeys":{"id":"responses.AccountPublicKeys","required":["accountPublicKeys","nextPageStart"],"properties":{"accountPublicKeys":{"type":"array","items":{"$ref":"#/definitions/responses.AccountPublicKey"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"}}},"responses.Accounts":{"id":"responses.Accounts","required":["accounts","usersCount","orgsCount","nextPageStart","resourceCount"],"properties":{"accounts":{"type":"array","items":{"$ref":"#/definitions/responses.Account"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"},"orgsCount":{"description":"The total (unpaged) number of organizations (not considering any filters applied to this request)","type":"integer","format":"int32"},"resourceCount":{"description":"The total (unpaged) number of items (not considering any filters applied to the request)","type":"integer","format":"int32"},"usersCount":{"description":"The total (unpaged) number of users (not considering any filters applied to this request)","type":"integer","format":"int32"}}},"responses.BulkResult":{"id":"responses.BulkResult","required":["op","success"],"properties":{"error":{"description":"If not successful, the error encountered when performing the operation on this resource","$ref":"#/definitions/errors.APIError"},"op":{"description":"The operation which was perfomed","type":"string"},"ref":{"description":"The corresponding identifier in the bulk operation request","type":"string"},"success":{"description":"Whether the bulk operation was successful for this resource","type":"boolean"}}},"responses.BulkResults":{"id":"responses.BulkResults","required":["results"],"properties":{"results":{"description":"List of results for the bulk operation. The index of a bulk result corresponds to the index of the resource in the bulk request if result identifiers are omitted","type":"array","items":{"$ref":"#/definitions/responses.BulkResult"}}}},"responses.Certificate":{"id":"responses.Certificate","required":["label","cert"],"properties":{"cert":{"description":"Encoded PEM for the cert","type":"string"},"label":{"description":"Label for the certificate","type":"string"}}},"responses.Grant":{"id":"responses.Grant","required":["subjectID","objectID","roleID"],"properties":{"objectID":{"description":"ID of the object managed by the service","type":"string"},"roleID":{"description":"ID of the role managed by the service","type":"string"},"subjectID":{"description":"ID of the subject of this grant","type":"string"}}},"responses.GrantSubject":{"id":"responses.GrantSubject","required":["id","subject_type"],"properties":{"account":{"description":"The account associated with this subject, if any","$ref":"#/definitions/responses.Account"},"id":{"description":"ID of this subject","type":"string"},"subject_type":{"description":"The type of this subject (anonymous, authenticated, user, team, org)","type":"string"},"team":{"description":"The team associated with this subject, if any","$ref":"#/definitions/responses.Team"}}},"responses.Grants":{"id":"responses.Grants","required":["grants","subjects","nextPageStart","resourceCount"],"properties":{"grants":{"type":"array","items":{"$ref":"#/definitions/responses.Grant"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"},"resourceCount":{"description":"The total (unpaged) number of items (not considering any filters applied to the request)","type":"integer","format":"int32"},"subjects":{"type":"array","items":{"$ref":"#/definitions/responses.GrantSubject"}}}},"responses.GroupLinkOpts":{"id":"responses.GroupLinkOpts","required":["enableLink","groupName"],"properties":{"enableLink":{"description":"Whether to enable SAML linking. If false, all other fields are ignored","type":"boolean"},"groupName":{"description":"The group name that is obtained from group attribute of the SAML assertion","type":"string"}}},"responses.Member":{"id":"responses.Member","required":["member","isAdmin"],"properties":{"isAdmin":{"description":"Whether the member is an admin of the organization or team","type":"boolean"},"member":{"description":"The user which is a member of the organization or team","$ref":"#/definitions/responses.Account"}}},"responses.MemberOrg":{"id":"responses.MemberOrg","required":["org","isAdmin"],"properties":{"isAdmin":{"description":"Whether the user is an admin of the organization","type":"boolean"},"org":{"description":"The organization which the user is a member of","$ref":"#/definitions/responses.Account"}}},"responses.MemberOrgs":{"id":"responses.MemberOrgs","required":["memberOrgs","nextPageStart"],"properties":{"memberOrgs":{"type":"array","items":{"$ref":"#/definitions/responses.MemberOrg"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"}}},"responses.MemberSyncOpts":{"id":"responses.MemberSyncOpts","required":["enableSync","selectGroupMembers","groupDN","groupMemberAttr","searchBaseDN","searchScopeSubtree","searchFilter"],"properties":{"enableSync":{"description":"Whether to enable LDAP syncing. If false, all other fields are ignored","type":"boolean"},"groupDN":{"description":"The distinguished name of the LDAP group. Applicable only if selectGroupMembers is true, ignored otherwise","type":"string"},"groupMemberAttr":{"description":"The name of the LDAP group entry attribute which corresponds to distinguished names of members. Applicable only if selectGroupMembers is true, ignored otherwise","type":"string"},"searchBaseDN":{"description":"The distinguished name of the element from which the LDAP server will search for users. Applicable only if selectGroupMembers is false, ignored otherwise","type":"string"},"searchFilter":{"description":"The LDAP search filter used to select users if selectGroupMembers is false, may be left blank","type":"string"},"searchScopeSubtree":{"description":"Whether to search for users in the entire subtree of the base DN or to only search one level under the base DN (if false). Applicable only if selectGroupMembers is false, ignored otherwise","type":"boolean"},"selectGroupMembers":{"description":"Whether to sync using a group DN and member attribute selection or to use a search filter (if false)","type":"boolean"}}},"responses.MemberTeam":{"id":"responses.MemberTeam","required":["team","isAdmin"],"properties":{"isAdmin":{"description":"Whether the user is an admin of the team","type":"boolean"},"team":{"description":"The team which the user is a member of","$ref":"#/definitions/responses.Team"}}},"responses.MemberTeams":{"id":"responses.MemberTeams","required":["memberTeams","nextPageStart"],"properties":{"memberTeams":{"type":"array","items":{"$ref":"#/definitions/responses.MemberTeam"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"}}},"responses.Members":{"id":"responses.Members","required":["members","nextPageStart","resourceCount"],"properties":{"members":{"type":"array","items":{"$ref":"#/definitions/responses.Member"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"},"resourceCount":{"description":"The total (unpaged) number of items (not considering any filters applied to the request)","type":"integer","format":"int32"}}},"responses.Team":{"id":"responses.Team","required":["orgID","name","id","description","membersCount"],"properties":{"description":{"description":"Description of the team","type":"string"},"id":{"description":"ID of the team","type":"string"},"membersCount":{"description":"The number of members of the team","type":"integer","format":"int32"},"name":{"description":"Name of the team","type":"string"},"orgID":{"description":"ID of the organization to which this team belongs","type":"string"}}},"responses.Teams":{"id":"responses.Teams","required":["teams","nextPageStart","resourceCount"],"properties":{"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"},"resourceCount":{"description":"The total (unpaged) number of items (not considering any filters applied to the request)","type":"integer","format":"int32"},"teams":{"type":"array","items":{"$ref":"#/definitions/responses.Team"}}}},"role.Role":{"id":"role.Role","required":["id","name","system_role","operations"],"properties":{"id":{"type":"string"},"name":{"type":"string"},"operations":{"type":"object"},"system_role":{"type":"boolean"}}},"types.ClusterConfig":{"id":"types.ClusterConfig","required":["ControllerPort","KubeAPIServerPort","ProxyKubeAPIServerPort","SwarmPort","SwarmStrategy","DNS","DNSOpt","DNSSearch","KVTimeout","KVSnapshotCount","ProfilingEnabled","ExternalServiceLB","MetricsRetentionTime","MetricsScrapeInterval","ProxyMetricsScrapeInterval","RethinkDBCacheSize","CloudProvider","CNIInstallerURL","PodCIDR","CalicoMTU","IPIPMTU","UnmanagedCNI","NodePortRange","AzureIPCount","CustomKubeAPIServerFlags","CustomKubeControllerManagerFlags","CustomKubeletFlags","CustomKubeSchedulerFlags","LocalVolumeCollectionMapping","AuthKeySecretID","managerKubeReservedResources","workerKubeReservedResources"],"properties":{"AuthKeySecretID":{"type":"string"},"AzureIPCount":{"type":"string"},"CNIInstallerURL":{"type":"string"},"CalicoMTU":{"type":"string"},"CloudProvider":{"type":"string"},"ControllerPort":{"type":"integer","format":"int32"},"CustomKubeAPIServerFlags":{"type":"array","items":{"type":"string"}},"CustomKubeControllerManagerFlags":{"type":"array","items":{"type":"string"}},"CustomKubeSchedulerFlags":{"type":"array","items":{"type":"string"}},"CustomKubeletFlags":{"type":"array","items":{"type":"string"}},"DNS":{"type":"array","items":{"type":"string"}},"DNSOpt":{"type":"array","items":{"type":"string"}},"DNSSearch":{"type":"array","items":{"type":"string"}},"ExternalServiceLB":{"type":"string"},"IPIPMTU":{"type":"string"},"KVSnapshotCount":{"type":"integer","format":"int32"},"KVTimeout":{"type":"integer","format":"int32"},"KubeAPIServerPort":{"type":"integer","format":"int32"},"LocalVolumeCollectionMapping":{"type":"boolean"},"MetricsRetentionTime":{"type":"string"},"MetricsScrapeInterval":{"type":"string"},"NodePortRange":{"type":"string"},"PodCIDR":{"type":"string"},"ProfilingEnabled":{"type":"boolean"},"ProxyKubeAPIServerPort":{"type":"integer","format":"int32"},"ProxyMetricsScrapeInterval":{"type":"string"},"RethinkDBCacheSize":{"type":"string"},"SwarmPort":{"type":"integer","format":"int32"},"SwarmStrategy":{"type":"string"},"UnmanagedCNI":{"type":"boolean"},"managerKubeReservedResources":{"type":"string"},"workerKubeReservedResources":{"type":"string"}}},"v1.FinalizerName":{"id":"v1.FinalizerName"},"v1.Initializer":{"id":"v1.Initializer","description":"Initializer is information about an initializer that has not yet completed.","required":["name"],"properties":{"name":{"description":"name of the process that is responsible for initializing this object.","type":"string"}}},"v1.Initializers":{"id":"v1.Initializers","description":"Initializers tracks the progress of initialization.","required":["pending"],"properties":{"pending":{"description":"Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.","type":"array","items":{"$ref":"#/definitions/v1.Initializer"}},"result":{"description":"If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.","$ref":"#/definitions/v1.Status"}}},"v1.ListMeta":{"id":"v1.ListMeta","description":"ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.","properties":{"continue":{"description":"continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response.","type":"string"},"resourceVersion":{"description":"String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is a URL representing this object. Populated by the system. Read-only.","type":"string"}}},"v1.Namespace":{"id":"v1.Namespace","description":"Namespace provides a scope for Names. Use of multiple namespaces is optional.","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata","$ref":"#/definitions/v1.ObjectMeta"},"spec":{"description":"Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status","$ref":"#/definitions/v1.NamespaceSpec"},"status":{"description":"Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status","$ref":"#/definitions/v1.NamespaceStatus"}}},"v1.NamespaceList":{"id":"v1.NamespaceList","description":"NamespaceList is a list of Namespaces.","required":["items"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources","type":"string"},"items":{"description":"Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/","type":"array","items":{"$ref":"#/definitions/v1.Namespace"}},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","$ref":"#/definitions/v1.ListMeta"}}},"v1.NamespaceSpec":{"id":"v1.NamespaceSpec","description":"NamespaceSpec describes the attributes on a Namespace.","properties":{"finalizers":{"description":"Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/","type":"array","items":{"$ref":"#/definitions/v1.FinalizerName"}}}},"v1.NamespaceStatus":{"id":"v1.NamespaceStatus","description":"NamespaceStatus is information about the current status of a Namespace.","properties":{"phase":{"description":"Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/","type":"string"}}},"v1.ObjectMeta":{"id":"v1.ObjectMeta","description":"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.","properties":{"annotations":{"description":"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations","type":"object"},"clusterName":{"description":"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.","type":"string"},"creationTimestamp":{"description":"CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata","type":"string"},"deletionGracePeriodSeconds":{"description":"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.","type":"integer","format":"int64"},"deletionTimestamp":{"description":"DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata","type":"string"},"finalizers":{"description":"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.","type":"array","items":{"type":"string"}},"generateName":{"description":"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency","type":"string"},"generation":{"description":"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.","type":"integer","format":"int64"},"initializers":{"description":"An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.","$ref":"#/definitions/v1.Initializers"},"labels":{"description":"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels","type":"object"},"name":{"description":"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names","type":"string"},"namespace":{"description":"Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces","type":"string"},"ownerReferences":{"description":"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.","type":"array","items":{"$ref":"#/definitions/v1.OwnerReference"}},"resourceVersion":{"description":"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"SelfLink is a URL representing this object. Populated by the system. Read-only.","type":"string"},"uid":{"description":"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids","type":"string"}}},"v1.OwnerReference":{"id":"v1.OwnerReference","description":"OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.","required":["apiVersion","kind","name","uid"],"properties":{"apiVersion":{"description":"API version of the referent.","type":"string"},"blockOwnerDeletion":{"description":"If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.","type":"boolean"},"controller":{"description":"If true, this reference points to the managing controller.","type":"boolean"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"name":{"description":"Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names","type":"string"},"uid":{"description":"UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids","type":"string"}}},"v1.Status":{"id":"v1.Status","description":"Status is a return value for calls that don't return other objects.","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources","type":"string"},"code":{"description":"Suggested HTTP return code for this status, 0 if not set.","type":"integer","format":"int32"},"details":{"description":"Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.","$ref":"#/definitions/v1.StatusDetails"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"message":{"description":"A human-readable description of the status of this operation.","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","$ref":"#/definitions/v1.ListMeta"},"reason":{"description":"A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.","type":"string"},"status":{"description":"Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status","type":"string"}}},"v1.StatusCause":{"id":"v1.StatusCause","description":"StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.","properties":{"field":{"description":"The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"","type":"string"},"message":{"description":"A human-readable description of the cause of the error. This field may be presented as-is to a reader.","type":"string"},"reason":{"description":"A machine-readable description of the cause of the error. If this value is empty there is no information available.","type":"string"}}},"v1.StatusDetails":{"id":"v1.StatusDetails","description":"StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.","properties":{"causes":{"description":"The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.","type":"array","items":{"$ref":"#/definitions/v1.StatusCause"}},"group":{"description":"The group attribute of the resource associated with the status StatusReason.","type":"string"},"kind":{"description":"The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"name":{"description":"The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).","type":"string"},"retryAfterSeconds":{"description":"If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.","type":"integer","format":"int32"},"uid":{"description":"UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids","type":"string"}}},"||authz.Collection":{"id":"||authz.Collection"},"||authz.RoleCreateResponse":{"id":"||authz.RoleCreateResponse"},"||role.Role":{"id":"||role.Role"}},"securityDefinitions":{"BearerToken":{"type":"apiKey","name":"Authorization","in":"header"}},"security":[{"BearerToken":[]}],"tags":[{"description":"Create and manage containers.\n","name":"Container","x-displayName":"Containers"},{"name":"Image","x-displayName":"Images"},{"description":"Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information.\n","name":"Network","x-displayName":"Networks"},{"description":"Create and manage persistent storage that can be attached to containers.\n","name":"Volume","x-displayName":"Volumes"},{"description":"Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information.\n\nTo exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`.\n","name":"Exec","x-displayName":"Exec"},{"description":"Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information.\n","name":"Swarm","x-displayName":"Swarm"},{"description":"Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work.\n","name":"Node","x-displayName":"Nodes"},{"description":"Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work.\n","name":"Service","x-displayName":"Services"},{"description":"A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work.\n","name":"Task","x-displayName":"Tasks"},{"description":"Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work.\n","name":"Secret","x-displayName":"Secrets"},{"description":"Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work.\n","name":"Config","x-displayName":"Configs"},{"name":"Plugin","x-displayName":"Plugins"},{"name":"System","x-displayName":"System"},{"description":"API endpoints which are specific to UCP","name":"UCP"}]} - , + {"swagger":"2.0","info":{"description":"The Universal Control Plane API is a REST API, available using HTTPS, that enables programmatic access to swarm resources that are managed by UCP. UCP exposes the full Docker Engine API, so you can extend your existing code with UCP features. The API is secured with role-based access control so that only authorized users can make changes and deploy applications to your Docker swarm.\n\nThe UCP API is accessible in the same IP addresses and domain names that you use to access the web UI. It's the same API that the UCP web UI uses, so everything you can do on the UCP web UI from your browser, you can also do programmatically.\n\nThe system manages swarm resources by using collections, which you access through the `/collection` endpoint. For example, `GET /defaultCollection/` retrieves the default collection for a user. [Learn more about resource collections](https://www.docker.com/ucp-3).\n\n- The `/roles` endpoint lets you enumerate and create custom permissions for accessing collections.\n\n- The `/accounts` endpoint enables managing users, teams, and organizations.\n\n- The `/configs` endpoint gives you access to the swarm's configuration.","title":"UCP API Documentation","version":"1.39"},"paths":{"/_ping":{"get":{"description":"Check the health of a UCP manager.\nUse the `_ping` endpoint to check the health of a single UCP manager node. The UCP manager validates that all of its internal components are working, and it returns either 200, if all components are healthy, or 500, if any aren't healthy.\n\nIf you’re accessing the `_ping` endpoint through a load balancer, you have no way of knowing which UCP manager node isn't healthy, because any manager node may be serving your request. Make sure you’re connecting directly to the URL of a manager node, and not a load balancer.","tags":["UCP"],"summary":"Check the health of a UCP manager.","operationId":"Ping","responses":{"200":{"description":"Success, manager healthy"},"500":{"description":"Failure, manager unhealthy"},"default":{"description":"Success, manager healthy"}}}},"/accounts/":{"get":{"description":"List user and organization accounts.\nLists information about user and organization accounts. Supports sorting and\nfiltering.\nRequires authentication and authorization as any user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"List user and organization accounts.","operationId":"ListAccounts","parameters":[{"type":"string","default":"all","description":"Filter accounts by type or attribute - either \"users\", \"orgs\", \"admins\", \"non-admins\", \"active-users\", \"inactive-users\", or \"all\" (default). These filters cannot be combined in any way.","name":"filter","in":"query"},{"type":"string","default":"","description":"Specifies the ordering of the results - either \"name\" (default) or \"fullName\". Prefix with \"+\" (default) or \"-\" to specify acscending or descending order, respectively.","name":"order","in":"query"},{"type":"string","default":"","description":"Only return accounts with an order marker starting from this value.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of accounts per page of results.","name":"limit","in":"query"},{"type":"string","default":"","description":"Additionally filter results to those which have either a name or full name which contains this case insensitive string","name":"contains","in":"query"}],"responses":{"200":{"description":"Success, page of accounts listed.","schema":{"$ref":"#/definitions/responses.Accounts"}},"default":{"description":"Success, page of accounts listed.","schema":{"$ref":"#/definitions/responses.Accounts"}}}},"post":{"description":"Create a user or organization account.\nTo search for and import a user from an LDAP directory, the system must be\nconfigured with LDAP integration.\nRequires authentication and authorization as an admin user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Create a user or organization account.","operationId":"CreateAccount","parameters":[{"type":"forms.CreateAccount","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.CreateAccount"}}],"responses":{"201":{"description":"Success, account created.","schema":{"$ref":"#/definitions/responses.Account"}}}},"patch":{"description":"Update information about user accounts or organizations, in bulk.\nRequires authentication and authorization as an admin user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Update information about user accounts or organizations, in bulk.","operationId":"BulkAccountOps","parameters":[{"type":"forms.BulkOperations","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.BulkOperations"}}],"responses":{"200":{"description":"Success, bulk operations performed. Any errors encountered for an operation are returned.","schema":{"$ref":"#/definitions/responses.BulkResults"}},"default":{"description":"Success, bulk operations performed. Any errors encountered for an operation are returned.","schema":{"$ref":"#/definitions/responses.BulkResults"}}}}},"/accounts/{accountNameOrID}":{"get":{"description":"Details for a user or organization account.\nRequires authentication and authorization as any user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Details for a user or organization account.","operationId":"GetAccount","parameters":[{"type":"string","default":"","description":"Name or id of account to fetch","name":"accountNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, account returned.","schema":{"$ref":"#/definitions/responses.Account"}},"default":{"description":"Success, account returned.","schema":{"$ref":"#/definitions/responses.Account"}}}},"delete":{"description":"Delete a user or organization account.\nIf the system is configured to import users from an LDAP directory, the user\nmay be created again if they still match the current LDAP search config.\nRequires authentication and authorization as an admin user.","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Delete a user or organization account.","operationId":"DeleteAccount","parameters":[{"type":"string","default":"","description":"Name or id of account to delete","name":"accountNameOrID","in":"path","required":true}],"responses":{"204":{"description":"Success, account deleted."}}},"patch":{"description":"Update details for a user or organization account.\nRequires authentication and authorization as an admin user, the target user (if\na user), or an admin member of the target organization (if an organization).","consumes":["application/json"],"produces":["application/json"],"tags":["Accounts"],"summary":"Update details for a user or organization account.","operationId":"UpdateAccount","parameters":[{"type":"string","default":"","description":"Name or id of account to update","name":"accountNameOrID","in":"path","required":true},{"type":"forms.UpdateAccount","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.UpdateAccount"}}],"responses":{"200":{"description":"Success, account updated.","schema":{"$ref":"#/definitions/responses.Account"}},"default":{"description":"Success, account updated.","schema":{"$ref":"#/definitions/responses.Account"}}}}},"/accounts/{accountNameOrID}/publicKeys":{"get":{"description":"List accountPublicKeys in an account.\nLists accountPublicKeys in ascending order by key ID.\nRequires authentication and authorization as any user.","consumes":["application/json"],"produces":["application/json"],"tags":["Account Public Keys","Accounts"],"summary":"List accountPublicKeys in an account.","operationId":"ListAccountPublicKeys","parameters":[{"type":"string","default":"","description":"Name or id of the account whose accountPublicKeys will be listed","name":"accountNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Only return accountPublicKeys with a key ID greater than or equal to this name.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of accountPublicKeys per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of accountPublicKeys listed.","schema":{"$ref":"#/definitions/responses.AccountPublicKeys"}},"default":{"description":"Success, page of accountPublicKeys listed.","schema":{"$ref":"#/definitions/responses.AccountPublicKeys"}}}},"post":{"description":"Create a public key for an account.\nRequires authentication and authorization as an admin user, the target user (if\na user), or an admin member of the target organization (if an organization).","consumes":["application/json"],"produces":["application/json"],"tags":["Account Public Keys","Accounts"],"summary":"Create a public key for an account.","operationId":"CreateAccountPublicKey","parameters":[{"type":"string","default":"","description":"Name or id of account to fetch","name":"accountNameOrID","in":"path","required":true},{"type":"forms.CreateAccountPublicKey","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.CreateAccountPublicKey"}}],"responses":{"201":{"description":"Success, account public key created.","schema":{"$ref":"#/definitions/responses.AccountPublicKey"}}}}},"/accounts/{accountNameOrID}/publicKeys/{keyID}":{"delete":{"description":"Remove an account public key.\nRequires authentication and authorization as an admin user, the target user (if\na user), or an admin member of the target organization (if an organization).","consumes":["application/json"],"produces":["application/json"],"tags":["Account Public Keys","Accounts"],"summary":"Remove an account public key.","operationId":"DeleteAccountPublicKey","parameters":[{"type":"string","default":"","description":"Name or id of the account","name":"accountNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Public key id of the account","name":"keyID","in":"path","required":true}],"responses":{"204":{"description":"Success, acount public key removed."}}},"patch":{"description":"Update details for an account public key.\nRequires authentication and authorization as an admin user, the target user (if\na user), or an admin member of the target organization (if an organization).","consumes":["application/json"],"produces":["application/json"],"tags":["Account Public Keys","Accounts"],"summary":"Update details for an account public key.","operationId":"UpdateAccountPublicKey","parameters":[{"type":"string","default":"","description":"Public key id of the account","name":"keyID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of the account","name":"accountNameOrID","in":"path","required":true},{"type":"forms.UpdateAccountPublicKey","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.UpdateAccountPublicKey"}}],"responses":{"200":{"description":"Success, account public key updated.","schema":{"$ref":"#/definitions/responses.AccountPublicKey"}},"default":{"description":"Success, account public key updated.","schema":{"$ref":"#/definitions/responses.AccountPublicKey"}}}}},"/accounts/{orgNameOrID}/adminMemberSyncConfig":{"get":{"description":"Get options for syncing admin members of an organization.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Get options for syncing admin members of an organization.","operationId":"GetOrganizationAdminSyncConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization whose LDAP sync options to be retrieved","name":"orgNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, LDAP sync options retrieved.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}},"default":{"description":"Success, LDAP sync options retrieved.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}}}},"put":{"description":"Set options for syncing admin members of an organization.\nEnabling sync of organization admin members will disable the ability to\ndirectly manage organization membership for any users imported from an LDAP\ndirectory. Their organization membership is instead set by being synced as an\nadmin member of the organization or by being a member of any team within the\norganization.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Set options for syncing admin members of an organization.","operationId":"SetOrganizationAdminSyncConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization whose LDAP sync options to set","name":"orgNameOrID","in":"path","required":true},{"type":"forms.MemberSyncOpts","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.MemberSyncOpts"}}],"responses":{"200":{"description":"Success, LDAP sync options set.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}},"default":{"description":"Success, LDAP sync options set.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}}}}},"/accounts/{orgNameOrID}/members":{"get":{"description":"List members of an organization.\nLists memberships in ascending order by user ID.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"List members of an organization.","operationId":"ListOrganizationMembers","parameters":[{"type":"string","default":"","description":"Name or id of organization whose members will be listed","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"all","description":"Filter members by type - either 'admins', 'non-admins', or 'all' (default).","name":"filter","in":"query"},{"type":"string","default":"","description":"Only return members with a user ID greater than or equal to this ID.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of members per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of organization members listed.","schema":{"$ref":"#/definitions/responses.Members"}},"default":{"description":"Success, page of organization members listed.","schema":{"$ref":"#/definitions/responses.Members"}}}}},"/accounts/{orgNameOrID}/members/{memberNameOrID}":{"get":{"description":"Details of a user's membership in an organization.\nRequires authentication and authorization as an admin user, a member of the\norganization, or the target user.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Details of a user's membership in an organization.","operationId":"GetOrganizationMembership","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the membership will be retrieved","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user whose membership will be retrieved","name":"memberNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, membership returned.","schema":{"$ref":"#/definitions/responses.Member"}},"default":{"description":"Success, membership returned.","schema":{"$ref":"#/definitions/responses.Member"}}}},"put":{"description":"Add a user to an organization.\nIf organization admin members are configured to be synced with LDAP, users\nwhich are imported from LDAP cannot be manually added as members of the\norganization and must be either synced as an organization admin member or be\nadded as a member of team within the organization.\nRequires authentication and authorization as an admin user or an admin member\nof the organization","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Add a user to an organization.","operationId":"AddOrganizationMember","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the membership will be added","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user which will be added as a member","name":"memberNameOrID","in":"path","required":true},{"type":"forms.SetMembership","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.SetMembership"}}],"responses":{"200":{"description":"Success, membership set.","schema":{"$ref":"#/definitions/responses.Member"}},"default":{"description":"Success, membership set.","schema":{"$ref":"#/definitions/responses.Member"}}}},"delete":{"description":"Remove a user from an organization.\nRemoving a member of the organization will also remove them from any teams in\nthe organization. If organization admin members are configured to be synced\nwith LDAP, users which are imported from LDAP cannot be manually removed as\nmembers of the organization and must be either synced as an organization admin\nmember or removed as a member of all teams within the organization.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"Remove a user from an organization.","operationId":"DeleteOrganizationMember","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the membership will be deleted","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user whose membership will be deleted","name":"memberNameOrID","in":"path","required":true}],"responses":{"204":{"description":"Success, membership removed."}}}},"/accounts/{orgNameOrID}/members/{memberNameOrID}/teams":{"get":{"description":"List a user's team membership in an organization.\nLists team memberships in ascending order by team ID.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Organization Membership","Organizations","Accounts"],"summary":"List a user's team membership in an organization.","operationId":"ListOrganizationMemberTeams","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the member's team memberships will be listed","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user whose memberships will be listed","name":"memberNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Only return team memberships with a team ID greater than or equal to this ID.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of team memberships per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of member's teams listed.","schema":{"$ref":"#/definitions/responses.MemberTeams"}},"default":{"description":"Success, page of member's teams listed.","schema":{"$ref":"#/definitions/responses.MemberTeams"}}}}},"/accounts/{orgNameOrID}/teams":{"get":{"description":"List teams in an organization.\nLists teams in ascending order by name.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"List teams in an organization.","operationId":"ListTeams","parameters":[{"type":"string","default":"","description":"Name or id of organization whose teams will be listed","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Only return teams with a name greater than or equal to this name.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of teams per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of teams listed.","schema":{"$ref":"#/definitions/responses.Teams"}},"default":{"description":"Success, page of teams listed.","schema":{"$ref":"#/definitions/responses.Teams"}}}},"post":{"description":"Create a team.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"Create a team.","operationId":"CreateTeam","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team will be created","name":"orgNameOrID","in":"path","required":true},{"type":"forms.CreateTeam","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.CreateTeam"}}],"responses":{"201":{"description":"Success, team created.","schema":{"$ref":"#/definitions/responses.Team"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}":{"get":{"description":"Details for a team.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"Details for a team.","operationId":"GetTeam","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team will be retrieved","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team which will be retrieved","name":"teamNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, team returned.","schema":{"$ref":"#/definitions/responses.Team"}},"default":{"description":"Success, team returned.","schema":{"$ref":"#/definitions/responses.Team"}}}},"delete":{"description":"Delete a team.\nRequires authentication and authorization as an admin user or an admin member\nof the organization.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"Delete a team.","operationId":"DeleteTeam","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team will be deleted","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team which will be deleted","name":"teamNameOrID","in":"path","required":true}],"responses":{"204":{"description":"Success, team deleted."}}},"patch":{"description":"Update details for a team.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Teams","Organizations","Accounts"],"summary":"Update details for a team.","operationId":"UpdateTeam","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team will be updated","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team which will be updated","name":"teamNameOrID","in":"path","required":true},{"type":"forms.UpdateTeam","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.UpdateTeam"}}],"responses":{"200":{"description":"Success, team updated.","schema":{"$ref":"#/definitions/responses.Team"}},"default":{"description":"Success, team updated.","schema":{"$ref":"#/definitions/responses.Team"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}/groupLinkConfig":{"get":{"description":"Get options for linking group of a team.\nRequires authentication and authorization as an admin user, an admin group of\nthe organization, or an admin group of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Get options for linking group of a team.","operationId":"GetTeamGroupLinkConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization to which the team belongs","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team whose SAML link config will be retrieved","name":"teamNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, SAML link options retrieved.","schema":{"$ref":"#/definitions/responses.GroupLinkOpts"}},"default":{"description":"Success, SAML link options retrieved.","schema":{"$ref":"#/definitions/responses.GroupLinkOpts"}}}},"put":{"description":"Set options for linking this team with a group attribute from SAML assertions.\nEnabling link of team members will disable the ability to manually manage team\nmembership for any users imported from SAML. Their team membership is instead\nmanaged by the group attribute of the SAML assertion.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Set options for linking this team with a group attribute from SAML assertions.","operationId":"SetTeamGroupLinkConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization to which the team belongs","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team whose SAML link config will be set","name":"teamNameOrID","in":"path","required":true},{"type":"forms.GroupLinkOpts","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.GroupLinkOpts"}}],"responses":{"200":{"description":"Success, SAML link options set.","schema":{"$ref":"#/definitions/responses.GroupLinkOpts"}},"default":{"description":"Success, SAML link options set.","schema":{"$ref":"#/definitions/responses.GroupLinkOpts"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}/memberSyncConfig":{"get":{"description":"Get options for syncing members of a team.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Get options for syncing members of a team.","operationId":"GetTeamMemberSyncConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization to which the team belongs","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team whose LDAP sync config will be retrieved","name":"teamNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, LDAP sync options retrieved.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}},"default":{"description":"Success, LDAP sync options retrieved.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}}}},"put":{"description":"Set options for syncing members of a team.\nEnabling sync of team members will disable the ability to manually manage team\nmembership for any users imported from LDAP. Their team membership is instead\nmanaged by the LDAP sync.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Set options for syncing members of a team.","operationId":"SetTeamMemberSyncConfig","parameters":[{"type":"string","default":"","description":"Name or id of organization to which the team belongs","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team whose LDAP sync config will be set","name":"teamNameOrID","in":"path","required":true},{"type":"forms.MemberSyncOpts","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.MemberSyncOpts"}}],"responses":{"200":{"description":"Success, LDAP sync options set.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}},"default":{"description":"Success, LDAP sync options set.","schema":{"$ref":"#/definitions/responses.MemberSyncOpts"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}/members":{"get":{"description":"List members of a team.\nLists memberships in ascending order by user ID.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"List members of a team.","operationId":"ListTeamMembers","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team's members will be listed'","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of team whose members will be listed","name":"teamNameOrID","in":"path","required":true},{"type":"string","default":"all","description":"Filter members by type - either 'admins', 'non-admins', or 'all' (default).","name":"filter","in":"query"},{"type":"string","default":"","description":"Only return members with a user ID greater than or equal to this ID.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of members per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of team members listed.","schema":{"$ref":"#/definitions/responses.Members"}},"default":{"description":"Success, page of team members listed.","schema":{"$ref":"#/definitions/responses.Members"}}}}},"/accounts/{orgNameOrID}/teams/{teamNameOrID}/members/{memberNameOrID}":{"get":{"description":"Details of a user's membership in a team.\nRequires authentication and authorization as an admin user or a member of the\norganization.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Details of a user's membership in a team.","operationId":"GetTeamMembership","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team membership will be retrieved","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of the team in which the membership will be retrieved","name":"teamNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user whose team membership will be retrieved","name":"memberNameOrID","in":"path","required":true}],"responses":{"200":{"description":"Success, team membership retuned.","schema":{"$ref":"#/definitions/responses.Member"}},"default":{"description":"Success, team membership retuned.","schema":{"$ref":"#/definitions/responses.Member"}}}},"put":{"description":"Add a user to a team.\nThe user will be added as a member of the organization if they are not already.\nIf team members are configured to be synced with LDAP, users which are imported\nfrom LDAP cannot be manually added as members of the team and must be synced\nwith LDAP.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Add a user to a team.","operationId":"AddTeamMember","parameters":[{"type":"string","default":"","description":"Name or id of user which will be added as a member","name":"memberNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of organization in which the team membership will be added","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of the team in which the membership will be added","name":"teamNameOrID","in":"path","required":true},{"type":"forms.SetMembership","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.SetMembership"}}],"responses":{"200":{"description":"Success, team membership set.","schema":{"$ref":"#/definitions/responses.Member"}},"default":{"description":"Success, team membership set.","schema":{"$ref":"#/definitions/responses.Member"}}}},"delete":{"description":"Remove a member from a team.\nThe user will remain a member of the organization. If team members are\nconfigured to be synced with LDAP, users which are imported from LDAP cannot be\nmanually removed as members of the team and must be synced with LDAP.\nRequires authentication and authorization as an admin user, an admin member of\nthe organization, or an admin member of the team.","consumes":["application/json"],"produces":["application/json"],"tags":["Team Membership","Teams","Organizations","Accounts"],"summary":"Remove a member from a team.","operationId":"DeleteTeamMember","parameters":[{"type":"string","default":"","description":"Name or id of organization in which the team membership will be deleted","name":"orgNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of the team in which the membership will be deleted","name":"teamNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Name or id of user whose team membership will be deleted","name":"memberNameOrID","in":"path","required":true}],"responses":{"204":{"description":"Success, team membership deleted."}}}},"/accounts/{userNameOrID}/changePassword":{"post":{"description":"Change a user's password.\nRequires authentication and authorization as an admin user or the target user.","consumes":["application/json"],"produces":["application/json"],"tags":["User Accounts","Accounts"],"summary":"Change a user's password.","operationId":"ChangePassword","parameters":[{"type":"string","default":"","description":"Username or id of user whose password is to be changed","name":"userNameOrID","in":"path","required":true},{"type":"forms.ChangePassword","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.ChangePassword"}}],"responses":{"200":{"description":"Success, password changed.","schema":{"$ref":"#/definitions/responses.Account"}},"default":{"description":"Success, password changed.","schema":{"$ref":"#/definitions/responses.Account"}}}}},"/accounts/{userNameOrID}/organizations":{"get":{"description":"List a user's organization memberships.\nLists organization memberships in ascending order by organization ID.\nRequires authentication and authorization as an admin user or the target user.","consumes":["application/json"],"produces":["application/json"],"tags":["User Accounts","Accounts"],"summary":"List a user's organization memberships.","operationId":"ListUserOrganizations","parameters":[{"type":"string","default":"","description":"Name or id of user to whose organizations will be listed","name":"userNameOrID","in":"path","required":true},{"type":"string","default":"","description":"Only return memberships with an org ID greater than or equal to this ID.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of organizations per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success, page of user's organizations listed.","schema":{"$ref":"#/definitions/responses.MemberOrgs"}},"default":{"description":"Success, page of user's organizations listed.","schema":{"$ref":"#/definitions/responses.MemberOrgs"}}}}},"/api/composehelper":{"get":{"tags":["UCP"],"summary":"/api/composehelper","operationId":"restfulNoop","responses":{}}},"/api/ucp/app/render":{"post":{"tags":["UCP"],"summary":"/api/ucp/app/render","operationId":"restfulNoop","responses":{}}},"/api/ucp/config-toml":{"get":{"description":"Export the current UCP Configuration as a TOML file.","produces":["application/toml"],"tags":["UCP"],"summary":"Export the current UCP Configuration as a TOML file.","operationId":"Get Config TOML","responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/config.UCPConfiguration"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/config.UCPConfiguration"}}}},"put":{"description":"Import UCP Configuration from a TOML file.","consumes":["application/toml"],"produces":["application/json"],"tags":["UCP"],"summary":"Import UCP Configuration from a TOML file.","operationId":"PUT Config TOML","responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/api.putConfigOrLicenseResponse"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/api.putConfigOrLicenseResponse"}}}}},"/api/ucp/config/auth/ldap":{"get":{"description":"Retrieve current system LDAP configuration","consumes":["application/json"],"produces":["application/json"],"tags":["Config"],"summary":"Retrieve current system LDAP configuration","operationId":"GetLDAPSettings","responses":{"200":{"description":"Success, current LDAP config returned.","schema":{"$ref":"#/definitions/responses.LDAPSettings"}},"default":{"description":"Success, current LDAP config returned.","schema":{"$ref":"#/definitions/responses.LDAPSettings"}}}},"put":{"description":"Set system LDAP configuration","consumes":["application/json"],"produces":["application/json"],"tags":["Config"],"summary":"Set system LDAP configuration","operationId":"SetLDAPSettings","parameters":[{"type":"forms.LDAPSettings","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/forms.LDAPSettings"}}],"responses":{"200":{"description":"Success, current LDAP config set.","schema":{"$ref":"#/definitions/responses.LDAPSettings"}},"default":{"description":"Success, current LDAP config set.","schema":{"$ref":"#/definitions/responses.LDAPSettings"}}}}},"/auth":{"post":{"description":"Validate credentials for a registry and, if available, get an identity token for accessing the registry without password.","consumes":["application/json"],"produces":["application/json"],"tags":["System"],"summary":"Check auth configuration","operationId":"SystemAuth","parameters":[{"description":"Authentication to check","name":"authConfig","in":"body","schema":{"$ref":"#/definitions/AuthConfig"}}],"responses":{"200":{"description":"An identity token was generated successfully.","schema":{"type":"object","title":"SystemAuthResponse","required":["Status"],"properties":{"IdentityToken":{"description":"An opaque token used to authenticate a user after a successful login","type":"string","x-nullable":false},"Status":{"description":"The status of the authentication","type":"string","x-nullable":false}}},"examples":{"application/json":{"IdentityToken":"9cbaf023786cd7...","Status":"Login Succeeded"}}},"204":{"description":"No error"},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/auth/login":{"post":{"description":"Submit a Login Form in exchange for a Session Token.","consumes":["application/json"],"tags":["UCP"],"summary":"Submit a Login Form in exchange for a Session Token.","operationId":"Login","parameters":[{"type":"auth.Credentials","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/auth.Credentials"}}],"responses":{"200":{"description":"Success, login response returned.","schema":{"$ref":"#/definitions/auth.LoginResponse"}},"401":{"description":"Invalid username or password."},"default":{"description":"Success, login response returned.","schema":{"$ref":"#/definitions/auth.LoginResponse"}}}}},"/build":{"post":{"description":"Build an image from a tar archive with a `Dockerfile` in it.\n\nThe `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/).\n\nThe Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output.\n\nThe build is canceled if the client drops the connection by quitting or being killed.\n","consumes":["application/octet-stream"],"produces":["application/json"],"tags":["Image"],"summary":"Build an image","operationId":"ImageBuild","parameters":[{"description":"A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz.","name":"inputStream","in":"body","schema":{"type":"string","format":"binary"}},{"type":"string","default":"Dockerfile","description":"Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`.","name":"dockerfile","in":"query"},{"type":"string","description":"A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters.","name":"t","in":"query"},{"type":"string","description":"Extra hosts to add to /etc/hosts","name":"extrahosts","in":"query"},{"type":"string","description":"A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball.","name":"remote","in":"query"},{"type":"boolean","default":false,"description":"Suppress verbose build output.","name":"q","in":"query"},{"type":"boolean","default":false,"description":"Do not use the cache when building the image.","name":"nocache","in":"query"},{"type":"string","description":"JSON array of images used for build cache resolution.","name":"cachefrom","in":"query"},{"type":"string","description":"Attempt to pull the image even if an older image exists locally.","name":"pull","in":"query"},{"type":"boolean","default":true,"description":"Remove intermediate containers after a successful build.","name":"rm","in":"query"},{"type":"boolean","default":false,"description":"Always remove intermediate containers, even upon failure.","name":"forcerm","in":"query"},{"type":"integer","description":"Set memory limit for build.","name":"memory","in":"query"},{"type":"integer","description":"Total memory (memory + swap). Set as `-1` to disable swap.","name":"memswap","in":"query"},{"type":"integer","description":"CPU shares (relative weight).","name":"cpushares","in":"query"},{"type":"string","description":"CPUs in which to allow execution (e.g., `0-3`, `0,1`).","name":"cpusetcpus","in":"query"},{"type":"integer","description":"The length of a CPU period in microseconds.","name":"cpuperiod","in":"query"},{"type":"integer","description":"Microseconds of CPU time that the container can get in a CPU period.","name":"cpuquota","in":"query"},{"type":"string","description":"JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values.\n\nFor example, the build arg `FOO=bar` would become `{\"FOO\":\"bar\"}` in JSON. This would result in the the query parameter `buildargs={\"FOO\":\"bar\"}`. Note that `{\"FOO\":\"bar\"}` should be URI component encoded.\n\n[Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)\n","name":"buildargs","in":"query"},{"type":"integer","description":"Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB.","name":"shmsize","in":"query"},{"type":"boolean","description":"Squash the resulting images layers into a single layer. *(Experimental release only.)*","name":"squash","in":"query"},{"type":"string","description":"Arbitrary key/value labels to set on the image, as a JSON map of string pairs.","name":"labels","in":"query"},{"type":"string","description":"Sets the networking mode for the run commands during build. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken as a custom network's name to which this container should connect to.","name":"networkmode","in":"query"},{"enum":["application/x-tar"],"type":"string","default":"application/x-tar","name":"Content-type","in":"header"},{"type":"string","description":"This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to.\n\nThe key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example:\n\n```\n{\n \"docker.example.com\": {\n \"username\": \"janedoe\",\n \"password\": \"hunter2\"\n },\n \"https://index.docker.io/v1/\": {\n \"username\": \"mobydock\",\n \"password\": \"conta1n3rize14\"\n }\n}\n```\n\nOnly the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API.\n","name":"X-Registry-Config","in":"header"},{"type":"string","default":"","description":"Platform in the format os[/arch[/variant]]","name":"platform","in":"query"},{"type":"string","default":"","description":"Target build stage","name":"target","in":"query"}],"responses":{"200":{"description":"no error"},"400":{"description":"Bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/collectionByPath":{"get":{"description":"Retrieve a single collection by path.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve a single collection by path.","operationId":"Get Collection by path","parameters":[{"type":"string","default":"","description":"Path of the collection to get.","name":"path","in":"query"}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}}}}},"/collectionGrants":{"get":{"description":"Lists all collection grants","produces":["application/json"],"tags":["UCP"],"summary":"Lists all collection grants","operationId":"ListGrants","parameters":[{"type":"string","default":"","description":"Filter grants by subjectID. Only a single value may be specified for this query parameter. A subjectID may be an account ID for a user or organization, or a team ID.","name":"subjectID","in":"query"},{"type":"string","default":"","description":"Filter grants by collection ID. Only a single value may be specified for this query parameter.","name":"objectID","in":"query"},{"type":"string","default":"","description":"Filter grants by roleID. Only a single value may be specified for this query parameter.","name":"roleID","in":"query"},{"type":"string","default":"all","description":"Filter grants by a subject type - either \"agent\", \"all\" (default), \"anonymous\", \"authenticated\", \"org\", \"team\", or \"user\" . These filters cannot be combined in any way.","name":"subjectType","in":"query"},{"type":"boolean","default":"false","description":"Expands the subject into a list of subjects that it belongs to.","name":"expandUser","in":"query"},{"type":"boolean","default":"false","description":"Include the collection paths in the response.","name":"showPaths","in":"query"},{"type":"string","default":"","description":"Only return grants with an order marker starting from this value.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of grants per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/responses.Grants"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/responses.Grants"}}}}},"/collectionGrants/{subjectID}/{objectID}/{roleID}":{"put":{"description":"Creates a collection grant","tags":["UCP"],"summary":"Creates a collection grant","operationId":"CreateGrant","parameters":[{"type":"string","default":"","description":"SubjectID of grant to create. For a service account, it should follow the format `system:serviceaccount::`","name":"subjectID","in":"path","required":true},{"type":"string","default":"","description":"ObjectID of grant to create","name":"objectID","in":"path","required":true},{"type":"string","default":"","description":"RoleID of grant to create","name":"roleID","in":"path","required":true}],"responses":{"201":{"description":"Success"}}},"delete":{"description":"Deletes a collection grant.","tags":["UCP"],"summary":"Deletes a collection grant.","operationId":"DeleteGrant","parameters":[{"type":"string","default":"","description":"ObjectID of grant to delete","name":"objectID","in":"path","required":true},{"type":"string","default":"","description":"RoleID of grant to delete","name":"roleID","in":"path","required":true},{"type":"string","default":"","description":"SubjectID of grant to delete","name":"subjectID","in":"path","required":true}],"responses":{"204":{"description":"Success"}}}},"/collections":{"get":{"description":"List all visible collections.","produces":["application/json"],"tags":["UCP"],"summary":"List all visible collections.","operationId":"List collections","parameters":[{"type":"string","default":"","description":"Only return collections with an order marker starting from this value.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of collections per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.Collection"}}},"default":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.Collection"}}}}},"post":{"description":"Create a new collection of resources that share mutual authorization settings.","consumes":["application/json"],"produces":["application/json"],"tags":["UCP"],"summary":"Create a new collection of resources that share mutual authorization settings.","operationId":"Create Collection","parameters":[{"type":"authz.CollectionCreate","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/authz.CollectionCreate"}}],"responses":{"201":{"description":"Success","schema":{"$ref":"#/definitions/authz.CollectionCreateResponse"}}}}},"/collections/{id}":{"get":{"description":"Retrieve a single collection by ID.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve a single collection by ID.","operationId":"Get Collection","parameters":[{"type":"string","default":"","description":"ID of the collection to get","name":"id","in":"path","required":true}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}}}},"delete":{"description":"Delete a single collection by ID.","tags":["UCP"],"summary":"Delete a single collection by ID.","operationId":"Delete Collection","parameters":[{"type":"string","default":"","description":"ID of the collection to delete.","name":"id","in":"path","required":true}],"responses":{"204":{"description":"Success"}}},"patch":{"description":"Updates an existing collection","consumes":["application/json"],"tags":["UCP"],"summary":"Updates an existing collection","operationId":"Update Collection","parameters":[{"type":"string","default":"","description":"ID of the collection to update.","name":"id","in":"path","required":true},{"type":"authz.CollectionUpdate","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/authz.CollectionUpdate"}}],"responses":{"200":{"description":"Success"},"default":{"description":"Success"}}}},"/collections/{id}/children":{"get":{"description":"Retrieve all children collection to a specific collection.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve all children collection to a specific collection.","operationId":"Get Collection Children","parameters":[{"type":"string","default":"","description":"ID of the collection whose children will be returned","name":"id","in":"path","required":true},{"type":"string","default":"","description":"Only return collections with an order marker starting from this value.","name":"start","in":"query"},{"type":"int","default":"10","description":"Maximum number of collections per page of results.","name":"limit","in":"query"}],"responses":{"200":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.Collection"}}},"default":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.Collection"}}}}}},"/commit":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Image"],"summary":"Create a new image from a container","operationId":"ImageCommit","parameters":[{"description":"The container configuration","name":"containerConfig","in":"body","schema":{"$ref":"#/definitions/ContainerConfig"}},{"type":"string","description":"The ID or name of the container to commit","name":"container","in":"query"},{"type":"string","description":"Repository name for the created image","name":"repo","in":"query"},{"type":"string","description":"Tag name for the create image","name":"tag","in":"query"},{"type":"string","description":"Commit message","name":"comment","in":"query"},{"type":"string","description":"Author of the image (e.g., `John Hannibal Smith `)","name":"author","in":"query"},{"type":"boolean","default":true,"description":"Whether to pause the container before committing","name":"pause","in":"query"},{"type":"string","description":"`Dockerfile` instructions to apply while committing","name":"changes","in":"query"}],"responses":{"201":{"description":"no error","schema":{"$ref":"#/definitions/IdResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/configs":{"get":{"produces":["application/json"],"tags":["Config"],"summary":"List configs","operationId":"ConfigList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters:\n\n- `id=`\n- `label= or label==value`\n- `name=`\n- `names=`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Config"},"example":[{"CreatedAt":"2016-11-05T01:20:17.327670065Z","ID":"ktnbjxoalbkvbvedmg1urrz8h","Spec":{"Name":"server.conf"},"UpdatedAt":"2016-11-05T01:20:17.327670065Z","Version":{"Index":11}}]}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/configs/create":{"post":{"description":"If you create a UCP config with a name that starts with `com.docker.ucp.config`, UCP verifies that the config is valid before saving it. Also, UCP validates any licenses with names that start with `com.docker.license`.","consumes":["application/json"],"produces":["application/json"],"tags":["Config"],"summary":"Create a config","operationId":"ConfigCreate","parameters":[{"name":"body","in":"body","schema":{"allOf":[{"$ref":"#/definitions/ConfigSpec"},{"type":"object","example":{"Data":"VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==","Labels":{"foo":"bar"},"Name":"server.conf"}}]}}],"responses":{"201":{"description":"no error","schema":{"$ref":"#/definitions/IdResponse"}},"409":{"description":"name conflicts with an existing object","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/configs/{id}":{"get":{"produces":["application/json"],"tags":["Config"],"summary":"Inspect a config","operationId":"ConfigInspect","parameters":[{"type":"string","description":"ID of the config","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Config"},"examples":{"application/json":{"CreatedAt":"2016-11-05T01:20:17.327670065Z","ID":"ktnbjxoalbkvbvedmg1urrz8h","Spec":{"Name":"app-dev.crt"},"UpdatedAt":"2016-11-05T01:20:17.327670065Z","Version":{"Index":11}}}},"404":{"description":"config not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"produces":["application/json"],"tags":["Config"],"summary":"Delete a config","operationId":"ConfigDelete","parameters":[{"type":"string","description":"ID of the config","name":"id","in":"path","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"config not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/configs/{id}/update":{"post":{"tags":["Config"],"summary":"Update a Config","operationId":"ConfigUpdate","parameters":[{"type":"string","description":"The ID or name of the config","name":"id","in":"path","required":true},{"description":"The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values.","name":"body","in":"body","schema":{"$ref":"#/definitions/ConfigSpec"}},{"type":"integer","format":"int64","description":"The version number of the config object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such config","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/create":{"post":{"consumes":["application/json","application/octet-stream"],"produces":["application/json"],"tags":["Container"],"summary":"Create a container","operationId":"ContainerCreate","parameters":[{"pattern":"/?[a-zA-Z0-9_-]+","type":"string","description":"Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`.","name":"name","in":"query"},{"description":"Container to create","name":"body","in":"body","required":true,"schema":{"allOf":[{"$ref":"#/definitions/ContainerConfig","example":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["date"],"Domainname":"","Entrypoint":"","Env":["FOO=bar","BAZ=quux"],"ExposedPorts":{"22/tcp":{}},"HostConfig":{"AutoRemove":true,"Binds":["/tmp:/tmp"],"BlkioDeviceReadBps":[{}],"BlkioDeviceReadIOps":[{}],"BlkioDeviceWriteBps":[{}],"BlkioDeviceWriteIOps":[{}],"BlkioWeight":300,"BlkioWeightDevice":[{}],"CapAdd":["NET_ADMIN"],"CapDrop":["MKNOD"],"CgroupParent":"","CpuPercent":80,"CpuPeriod":100000,"CpuQuota":50000,"CpuRealtimePeriod":1000000,"CpuRealtimeRuntime":10000,"CpuShares":512,"CpusetCpus":"0,1","CpusetMems":"0,1","Devices":[],"Dns":["8.8.8.8"],"DnsOptions":[""],"DnsSearch":[""],"GroupAdd":["newgroup"],"KernelMemory":0,"Links":["redis3:redis"],"LogConfig":{"Config":{},"Type":"json-file"},"MaximumIOBps":0,"MaximumIOps":0,"Memory":0,"MemoryReservation":0,"MemorySwap":0,"MemorySwappiness":60,"NanoCPUs":500000,"NetworkMode":"bridge","OomKillDisable":false,"OomScoreAdj":500,"PidMode":"","PidsLimit":-1,"PortBindings":{"22/tcp":[{"HostPort":"11022"}]},"Privileged":false,"PublishAllPorts":false,"ReadonlyRootfs":false,"RestartPolicy":{"MaximumRetryCount":0,"Name":""},"SecurityOpt":[],"ShmSize":67108864,"StorageOpt":{},"Ulimits":[{}],"VolumeDriver":"","VolumesFrom":["parent","other:ro"]},"Hostname":"","Image":"ubuntu","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"12:34:56:78:9a:bc","NetworkDisabled":false,"NetworkingConfig":{"EndpointsConfig":{"isolated_nw":{"Aliases":["server_x","server_y"],"IPAMConfig":{"IPv4Address":"172.20.30.33","IPv6Address":"2001:db8:abcd::3033","LinkLocalIPs":["169.254.34.68","fe80::3468"]},"Links":["container_1","container_2"]}}},"OpenStdin":false,"StdinOnce":false,"StopSignal":"SIGTERM","StopTimeout":10,"Tty":false,"User":"","Volumes":{"/volumes/data":{}},"WorkingDir":""}},{"type":"object","properties":{"HostConfig":{"$ref":"#/definitions/HostConfig"},"NetworkingConfig":{"description":"This container's networking configuration.","type":"object","properties":{"EndpointsConfig":{"description":"A mapping of network name to endpoint configuration for that network.","type":"object","additionalProperties":{"$ref":"#/definitions/EndpointSettings"}}}}},"example":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["date"],"Domainname":"","Entrypoint":"","Env":["FOO=bar","BAZ=quux"],"ExposedPorts":{"22/tcp":{}},"HostConfig":{"AutoRemove":true,"Binds":["/tmp:/tmp"],"BlkioDeviceReadBps":[{}],"BlkioDeviceReadIOps":[{}],"BlkioDeviceWriteBps":[{}],"BlkioDeviceWriteIOps":[{}],"BlkioWeight":300,"BlkioWeightDevice":[{}],"CapAdd":["NET_ADMIN"],"CapDrop":["MKNOD"],"CgroupParent":"","CpuPercent":80,"CpuPeriod":100000,"CpuQuota":50000,"CpuRealtimePeriod":1000000,"CpuRealtimeRuntime":10000,"CpuShares":512,"CpusetCpus":"0,1","CpusetMems":"0,1","Devices":[],"Dns":["8.8.8.8"],"DnsOptions":[""],"DnsSearch":[""],"GroupAdd":["newgroup"],"KernelMemory":0,"Links":["redis3:redis"],"LogConfig":{"Config":{},"Type":"json-file"},"MaximumIOBps":0,"MaximumIOps":0,"Memory":0,"MemoryReservation":0,"MemorySwap":0,"MemorySwappiness":60,"NanoCPUs":500000,"NetworkMode":"bridge","OomKillDisable":false,"OomScoreAdj":500,"PidMode":"","PidsLimit":-1,"PortBindings":{"22/tcp":[{"HostPort":"11022"}]},"Privileged":false,"PublishAllPorts":false,"ReadonlyRootfs":false,"RestartPolicy":{"MaximumRetryCount":0,"Name":""},"SecurityOpt":[],"ShmSize":67108864,"StorageOpt":{},"Ulimits":[{}],"VolumeDriver":"","VolumesFrom":["parent","other:ro"]},"Hostname":"","Image":"ubuntu","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"12:34:56:78:9a:bc","NetworkDisabled":false,"NetworkingConfig":{"EndpointsConfig":{"isolated_nw":{"Aliases":["server_x","server_y"],"IPAMConfig":{"IPv4Address":"172.20.30.33","IPv6Address":"2001:db8:abcd::3033","LinkLocalIPs":["169.254.34.68","fe80::3468"]},"Links":["container_1","container_2"]}}},"OpenStdin":false,"StdinOnce":false,"StopSignal":"SIGTERM","StopTimeout":10,"Tty":false,"User":"","Volumes":{"/volumes/data":{}},"WorkingDir":""}}],"example":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["date"],"Domainname":"","Entrypoint":"","Env":["FOO=bar","BAZ=quux"],"ExposedPorts":{"22/tcp":{}},"HostConfig":{"AutoRemove":true,"Binds":["/tmp:/tmp"],"BlkioDeviceReadBps":[{}],"BlkioDeviceReadIOps":[{}],"BlkioDeviceWriteBps":[{}],"BlkioDeviceWriteIOps":[{}],"BlkioWeight":300,"BlkioWeightDevice":[{}],"CapAdd":["NET_ADMIN"],"CapDrop":["MKNOD"],"CgroupParent":"","CpuPercent":80,"CpuPeriod":100000,"CpuQuota":50000,"CpuRealtimePeriod":1000000,"CpuRealtimeRuntime":10000,"CpuShares":512,"CpusetCpus":"0,1","CpusetMems":"0,1","Devices":[],"Dns":["8.8.8.8"],"DnsOptions":[""],"DnsSearch":[""],"GroupAdd":["newgroup"],"KernelMemory":0,"Links":["redis3:redis"],"LogConfig":{"Config":{},"Type":"json-file"},"MaximumIOBps":0,"MaximumIOps":0,"Memory":0,"MemoryReservation":0,"MemorySwap":0,"MemorySwappiness":60,"NanoCPUs":500000,"NetworkMode":"bridge","OomKillDisable":false,"OomScoreAdj":500,"PidMode":"","PidsLimit":-1,"PortBindings":{"22/tcp":[{"HostPort":"11022"}]},"Privileged":false,"PublishAllPorts":false,"ReadonlyRootfs":false,"RestartPolicy":{"MaximumRetryCount":0,"Name":""},"SecurityOpt":[],"ShmSize":67108864,"StorageOpt":{},"Ulimits":[{}],"VolumeDriver":"","VolumesFrom":["parent","other:ro"]},"Hostname":"","Image":"ubuntu","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"12:34:56:78:9a:bc","NetworkDisabled":false,"NetworkingConfig":{"EndpointsConfig":{"isolated_nw":{"Aliases":["server_x","server_y"],"IPAMConfig":{"IPv4Address":"172.20.30.33","IPv6Address":"2001:db8:abcd::3033","LinkLocalIPs":["169.254.34.68","fe80::3468"]},"Links":["container_1","container_2"]}}},"OpenStdin":false,"StdinOnce":false,"StopSignal":"SIGTERM","StopTimeout":10,"Tty":false,"User":"","Volumes":{"/volumes/data":{}},"WorkingDir":""}}}],"responses":{"201":{"description":"Container created successfully","schema":{"description":"OK response to ContainerCreate operation","type":"object","title":"ContainerCreateResponse","required":["Id","Warnings"],"properties":{"Id":{"description":"The ID of the created container","type":"string","x-nullable":false},"Warnings":{"description":"Warnings encountered when creating the container","type":"array","items":{"type":"string"},"x-nullable":false}}},"examples":{"application/json":{"Id":"e90e34656806","Warnings":[]}}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"conflict","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/json":{"get":{"description":"Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect).\n\nNote that it uses a different, smaller representation of a container than inspecting a single container. For example,\nthe list of linked containers is not propagated .\n","produces":["application/json"],"tags":["Container"],"summary":"List containers","operationId":"ContainerList","parameters":[{"type":"boolean","default":false,"description":"Return all containers. By default, only running containers are shown","name":"all","in":"query"},{"type":"integer","description":"Return this number of most recently created containers, including non-running ones.","name":"limit","in":"query"},{"type":"boolean","default":false,"description":"Return the size of container as fields `SizeRw` and `SizeRootFs`.","name":"size","in":"query"},{"type":"string","description":"Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{\"status\": [\"paused\"]}` will only return paused containers. Available filters:\n\n- `ancestor`=(`[:]`, ``, or ``)\n- `before`=(`` or ``)\n- `expose`=(`[/]`|`/[]`)\n- `exited=` containers with exit code of ``\n- `health`=(`starting`|`healthy`|`unhealthy`|`none`)\n- `id=` a container's ID\n- `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)\n- `is-task=`(`true`|`false`)\n- `label=key` or `label=\"key=value\"` of a container label\n- `name=` a container's name\n- `network`=(`` or ``)\n- `publish`=(`[/]`|`/[]`)\n- `since`=(`` or ``)\n- `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)\n- `volume`=(`` or ``)\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/ContainerSummary"},"examples":{"application/json":[{"Command":"echo 1","Created":1367854155,"HostConfig":{"NetworkMode":"default"},"Id":"8dfafdbc3a40","Image":"ubuntu:latest","ImageID":"d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"Mounts":[{"Destination":"/data","Driver":"local","Mode":"ro,Z","Name":"fac362...80535","Propagation":"","RW":false,"Source":"/data"}],"Names":["/boring_feynman"],"NetworkSettings":{"Networks":{"bridge":{"EndpointID":"2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.2","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:11:00:02","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}}},"Ports":[{"PrivatePort":2222,"PublicPort":3333,"Type":"tcp"}],"SizeRootFs":0,"SizeRw":12288,"State":"Exited","Status":"Exit 0"},{"Command":"echo 222222","Created":1367854155,"HostConfig":{"NetworkMode":"default"},"Id":"9cd87474be90","Image":"ubuntu:latest","ImageID":"d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82","Labels":{},"Mounts":[],"Names":["/coolName"],"NetworkSettings":{"Networks":{"bridge":{"EndpointID":"88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.8","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:11:00:08","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}}},"Ports":[],"SizeRootFs":0,"SizeRw":12288,"State":"Exited","Status":"Exit 0"},{"Command":"echo 3333333333333333","Created":1367854154,"HostConfig":{"NetworkMode":"default"},"Id":"3176a2479c92","Image":"ubuntu:latest","ImageID":"d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82","Labels":{},"Mounts":[],"Names":["/sleepy_dog"],"NetworkSettings":{"Networks":{"bridge":{"EndpointID":"8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.6","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:11:00:06","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}}},"Ports":[],"SizeRootFs":0,"SizeRw":12288,"State":"Exited","Status":"Exit 0"},{"Command":"echo 444444444444444444444444444444444","Created":1367854152,"HostConfig":{"NetworkMode":"default"},"Id":"4cb07b47f9fb","Image":"ubuntu:latest","ImageID":"d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82","Labels":{},"Mounts":[],"Names":["/running_cat"],"NetworkSettings":{"Networks":{"bridge":{"EndpointID":"d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.5","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:11:00:05","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}}},"Ports":[],"SizeRootFs":0,"SizeRw":12288,"State":"Exited","Status":"Exit 0"}]}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/prune":{"post":{"produces":["application/json"],"tags":["Container"],"summary":"Delete stopped containers","operationId":"ContainerPrune","parameters":[{"type":"string","description":"Filters to process on the prune list, encoded as JSON (a `map[string][]string`).\n\nAvailable filters:\n- `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.\n- `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"ContainerPruneResponse","properties":{"ContainersDeleted":{"description":"Container IDs that were deleted","type":"array","items":{"type":"string"}},"SpaceReclaimed":{"description":"Disk space reclaimed in bytes","type":"integer","format":"int64"}}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}":{"delete":{"tags":["Container"],"summary":"Remove a container","operationId":"ContainerDelete","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Remove the volumes associated with the container.","name":"v","in":"query"},{"type":"boolean","default":false,"description":"If the container is running, kill it before removing it.","name":"force","in":"query"},{"type":"boolean","default":false,"description":"Remove the specified link associated with the container.","name":"link","in":"query"}],"responses":{"204":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"conflict","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/archive":{"get":{"description":"Get a tar archive of a resource in the filesystem of container id.","produces":["application/x-tar"],"tags":["Container"],"summary":"Get an archive of a filesystem resource in a container","operationId":"ContainerArchive","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Resource in the container’s filesystem to archive.","name":"path","in":"query","required":true}],"responses":{"200":{"description":"no error"},"400":{"description":"Bad parameter","schema":{"allOf":[{"$ref":"#/definitions/ErrorResponse"},{"type":"object","properties":{"message":{"description":"The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file).","type":"string","x-nullable":false}}}]}},"404":{"description":"Container or path does not exist","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"put":{"description":"Upload a tar archive to be extracted to a path in the filesystem of container id.","consumes":["application/x-tar","application/octet-stream"],"tags":["Container"],"summary":"Extract an archive of files or folders to a directory in a container","operationId":"PutContainerArchive","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Path to a directory in the container to extract the archive’s contents into. ","name":"path","in":"query","required":true},{"type":"string","description":"If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa.","name":"noOverwriteDirNonDir","in":"query"},{"description":"The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz.","name":"inputStream","in":"body","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"The content was extracted successfully"},"400":{"description":"Bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"403":{"description":"Permission denied, the volume or container rootfs is marked as read-only.","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"No such container or path does not exist inside the container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"head":{"description":"A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path.","tags":["Container"],"summary":"Get information about files in a container","operationId":"ContainerArchiveInfo","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Resource in the container’s filesystem to archive.","name":"path","in":"query","required":true}],"responses":{"200":{"description":"no error","headers":{"X-Docker-Container-Path-Stat":{"type":"string","description":"A base64 - encoded JSON object with some filesystem header information about the path"}}},"400":{"description":"Bad parameter","schema":{"allOf":[{"$ref":"#/definitions/ErrorResponse"},{"type":"object","properties":{"message":{"description":"The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file).","type":"string","x-nullable":false}}}]}},"404":{"description":"Container or path does not exist","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/attach":{"post":{"description":"Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached.\n\nEither the `stream` or `logs` parameter must be `true` for this endpoint to do anything.\n\nSee [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details.\n\n### Hijacking\n\nThis endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket.\n\nThis is the response from the daemon for an attach request:\n\n```\nHTTP/1.1 200 OK\nContent-Type: application/vnd.docker.raw-stream\n\n[STREAM]\n```\n\nAfter the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server.\n\nTo hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers.\n\nFor example, the client sends this request to upgrade the connection:\n\n```\nPOST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1\nUpgrade: tcp\nConnection: Upgrade\n```\n\nThe Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream:\n\n```\nHTTP/1.1 101 UPGRADED\nContent-Type: application/vnd.docker.raw-stream\nConnection: Upgrade\nUpgrade: tcp\n\n[STREAM]\n```\n\n### Stream format\n\nWhen the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload.\n\nThe header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`).\n\nIt is encoded on the first eight bytes like this:\n\n```go\nheader := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}\n```\n\n`STREAM_TYPE` can be:\n\n- 0: `stdin` (is written on `stdout`)\n- 1: `stdout`\n- 2: `stderr`\n\n`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian.\n\nFollowing the header is the payload, which is the specified number of bytes of `STREAM_TYPE`.\n\nThe simplest way to implement this protocol is the following:\n\n1. Read 8 bytes.\n2. Choose `stdout` or `stderr` depending on the first byte.\n3. Extract the frame size from the last four bytes.\n4. Read the extracted size and output it on the correct output.\n5. Goto 1.\n\n### Stream format when using a TTY\n\nWhen the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`.\n","produces":["application/vnd.docker.raw-stream"],"tags":["Container"],"summary":"Attach to a container","operationId":"ContainerAttach","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.","name":"detachKeys","in":"query"},{"type":"boolean","default":false,"description":"Replay previous logs from the container.\n\nThis is useful for attaching to a container that has started and you want to output everything since the container started.\n\nIf `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output.\n","name":"logs","in":"query"},{"type":"boolean","default":false,"description":"Stream attached streams from the time the request was made onwards","name":"stream","in":"query"},{"type":"boolean","default":false,"description":"Attach to `stdin`","name":"stdin","in":"query"},{"type":"boolean","default":false,"description":"Attach to `stdout`","name":"stdout","in":"query"},{"type":"boolean","default":false,"description":"Attach to `stderr`","name":"stderr","in":"query"}],"responses":{"101":{"description":"no error, hints proxy about hijacking"},"200":{"description":"no error, no upgrade header found"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/changes":{"get":{"description":"Returns which files in a container's filesystem have been added, deleted,\nor modified. The `Kind` of modification can be one of:\n\n- `0`: Modified\n- `1`: Added\n- `2`: Deleted\n","produces":["application/json"],"tags":["Container"],"summary":"Get changes on a container’s filesystem","operationId":"ContainerChanges","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true}],"responses":{"200":{"description":"The list of changes","schema":{"type":"array","items":{"description":"change item in response to ContainerChanges operation","type":"object","title":"ContainerChangeResponseItem","required":["Path","Kind"],"properties":{"Kind":{"description":"Kind of change","type":"integer","format":"uint8","enum":[0,1,2],"x-nullable":false},"Path":{"description":"Path to file that has changed","type":"string","x-nullable":false}},"x-go-name":"ContainerChangeResponseItem"}},"examples":{"application/json":[{"Kind":0,"Path":"/dev"},{"Kind":1,"Path":"/dev/kmsg"},{"Kind":1,"Path":"/test"}]}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/exec":{"post":{"description":"Run a command inside a running container.","consumes":["application/json"],"produces":["application/json"],"tags":["Exec"],"summary":"Create an exec instance","operationId":"ContainerExec","parameters":[{"description":"Exec configuration","name":"execConfig","in":"body","required":true,"schema":{"type":"object","properties":{"AttachStderr":{"description":"Attach to `stderr` of the exec command.","type":"boolean"},"AttachStdin":{"description":"Attach to `stdin` of the exec command.","type":"boolean"},"AttachStdout":{"description":"Attach to `stdout` of the exec command.","type":"boolean"},"Cmd":{"description":"Command to run, as a string or array of strings.","type":"array","items":{"type":"string"}},"DetachKeys":{"description":"Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.","type":"string"},"Env":{"description":"A list of environment variables in the form `[\"VAR=value\", ...]`.","type":"array","items":{"type":"string"}},"Privileged":{"description":"Runs the exec process with extended privileges.","type":"boolean","default":false},"Tty":{"description":"Allocate a pseudo-TTY.","type":"boolean"},"User":{"description":"The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`.","type":"string"},"WorkingDir":{"description":"The working directory for the exec process inside the container.","type":"string"}},"example":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["date"],"DetachKeys":"ctrl-p,ctrl-q","Env":["FOO=bar","BAZ=quux"],"Tty":false}}},{"type":"string","description":"ID or name of container","name":"id","in":"path","required":true}],"responses":{"201":{"description":"no error","schema":{"$ref":"#/definitions/IdResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"container is paused","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/export":{"get":{"description":"Export the contents of a container as a tarball.","produces":["application/octet-stream"],"tags":["Container"],"summary":"Export a container","operationId":"ContainerExport","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/json":{"get":{"description":"Return low-level information about a container.","produces":["application/json"],"tags":["Container"],"summary":"Inspect a container","operationId":"ContainerInspect","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Return the size of container as fields `SizeRw` and `SizeRootFs`","name":"size","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"object","title":"ContainerInspectResponse","properties":{"AppArmorProfile":{"type":"string"},"Args":{"description":"The arguments to the command being run","type":"array","items":{"type":"string"}},"Config":{"$ref":"#/definitions/ContainerConfig"},"Created":{"description":"The time the container was created","type":"string"},"Driver":{"type":"string"},"ExecIDs":{"description":"IDs of exec instances that are running in the container.","type":"array","items":{"type":"string"},"x-nullable":true},"GraphDriver":{"$ref":"#/definitions/GraphDriverData"},"HostConfig":{"$ref":"#/definitions/HostConfig"},"HostnamePath":{"type":"string"},"HostsPath":{"type":"string"},"Id":{"description":"The ID of the container","type":"string"},"Image":{"description":"The container's image","type":"string"},"LogPath":{"type":"string"},"MountLabel":{"type":"string"},"Mounts":{"type":"array","items":{"$ref":"#/definitions/MountPoint"}},"Name":{"type":"string"},"NetworkSettings":{"$ref":"#/definitions/NetworkSettings"},"Node":{"description":"TODO","type":"object"},"Path":{"description":"The path to the command being run","type":"string"},"ProcessLabel":{"type":"string"},"ResolvConfPath":{"type":"string"},"RestartCount":{"type":"integer"},"SizeRootFs":{"description":"The total size of all the files in this container.","type":"integer","format":"int64"},"SizeRw":{"description":"The size of files that have been created or changed by this container.","type":"integer","format":"int64"},"State":{"description":"The state of the container.","type":"object","properties":{"Dead":{"type":"boolean"},"Error":{"type":"string"},"ExitCode":{"description":"The last exit code of this container","type":"integer"},"FinishedAt":{"description":"The time when this container last exited.","type":"string"},"OOMKilled":{"description":"Whether this container has been killed because it ran out of memory.","type":"boolean"},"Paused":{"description":"Whether this container is paused.","type":"boolean"},"Pid":{"description":"The process ID of this container","type":"integer"},"Restarting":{"description":"Whether this container is restarting.","type":"boolean"},"Running":{"description":"Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the cgroups freezer is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container's state is \"running\".\n","type":"boolean"},"StartedAt":{"description":"The time when this container was last started.","type":"string"},"Status":{"description":"The status of the container. For example, `\"running\"` or `\"exited\"`.\n","type":"string","enum":["created","running","paused","restarting","removing","exited","dead"]}}}}},"examples":{"application/json":{"AppArmorProfile":"","Args":["-c","exit 9"],"Config":{"AttachStderr":true,"AttachStdin":false,"AttachStdout":true,"Cmd":["/bin/sh","-c","exit 9"],"Domainname":"","Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Hostname":"ba033ac44011","Image":"ubuntu","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"","NetworkDisabled":false,"OpenStdin":false,"StdinOnce":false,"StopSignal":"SIGTERM","StopTimeout":10,"Tty":false,"User":"","Volumes":{"/volumes/data":{}},"WorkingDir":""},"Created":"2015-01-06T15:47:31.485331387Z","Driver":"devicemapper","ExecIDs":["b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca","3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4"],"HostConfig":{"BlkioDeviceReadBps":[{}],"BlkioDeviceReadIOps":[{}],"BlkioDeviceWriteBps":[{}],"BlkioDeviceWriteIOps":[{}],"BlkioWeight":0,"BlkioWeightDevice":[{}],"ContainerIDFile":"","CpuPercent":80,"CpuPeriod":100000,"CpuRealtimePeriod":1000000,"CpuRealtimeRuntime":10000,"CpuShares":0,"CpusetCpus":"","CpusetMems":"","Devices":[],"IpcMode":"","KernelMemory":0,"LogConfig":{"Type":"json-file"},"LxcConf":[],"MaximumIOBps":0,"MaximumIOps":0,"Memory":0,"MemoryReservation":0,"MemorySwap":0,"NetworkMode":"bridge","OomKillDisable":false,"OomScoreAdj":500,"PidMode":"","PortBindings":{},"Privileged":false,"PublishAllPorts":false,"ReadonlyRootfs":false,"RestartPolicy":{"MaximumRetryCount":2,"Name":"on-failure"},"ShmSize":67108864,"Sysctls":{"net.ipv4.ip_forward":"1"},"Ulimits":[{}],"VolumeDriver":""},"HostnamePath":"/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname","HostsPath":"/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts","Id":"ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39","Image":"04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2","LogPath":"/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log","MountLabel":"","Mounts":[{"Destination":"/data","Driver":"local","Mode":"ro,Z","Name":"fac362...80535","Propagation":"","RW":false,"Source":"/data"}],"Name":"/boring_euclid","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","Networks":{"bridge":{"EndpointID":"7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d","Gateway":"172.17.0.1","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"IPAddress":"172.17.0.2","IPPrefixLen":16,"IPv6Gateway":"","MacAddress":"02:42:ac:12:00:02","NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"}},"SandboxID":"","SandboxKey":""},"Path":"/bin/sh","ProcessLabel":"","ResolvConfPath":"/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf","RestartCount":1,"State":{"Dead":false,"Error":"","ExitCode":9,"FinishedAt":"2015-01-06T15:47:32.080254511Z","OOMKilled":false,"Paused":false,"Pid":0,"Restarting":false,"Running":true,"StartedAt":"2015-01-06T15:47:32.072697474Z","Status":"running"}}}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/kill":{"post":{"description":"Send a POSIX signal to a container, defaulting to killing to the container.","tags":["Container"],"summary":"Kill a container","operationId":"ContainerKill","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","default":"SIGKILL","description":"Signal to send to the container as an integer or string (e.g. `SIGINT`)","name":"signal","in":"query"}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"container is not running","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/logs":{"get":{"description":"Get `stdout` and `stderr` logs from a container.\n\nNote: This endpoint works only for containers with the `json-file` or `journald` logging driver.\n","tags":["Container"],"summary":"Get container logs","operationId":"ContainerLogs","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Return the logs as a stream.\n\nThis will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).\n","name":"follow","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stdout`","name":"stdout","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stderr`","name":"stderr","in":"query"},{"type":"integer","default":0,"description":"Only return logs since this time, as a UNIX timestamp","name":"since","in":"query"},{"type":"integer","default":0,"description":"Only return logs before this time, as a UNIX timestamp","name":"until","in":"query"},{"type":"boolean","default":false,"description":"Add timestamps to every log line","name":"timestamps","in":"query"},{"type":"string","default":"all","description":"Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines.","name":"tail","in":"query"}],"responses":{"101":{"description":"logs returned as a stream","schema":{"type":"string","format":"binary"}},"200":{"description":"logs returned as a string in response body","schema":{"type":"string"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/pause":{"post":{"description":"Use the cgroups freezer to suspend all processes in a container.\n\nTraditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.\n","tags":["Container"],"summary":"Pause a container","operationId":"ContainerPause","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/rename":{"post":{"tags":["Container"],"summary":"Rename a container","operationId":"ContainerRename","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"New name for the container","name":"name","in":"query","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"409":{"description":"name already in use","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/resize":{"post":{"description":"Resize the TTY for a container. You must restart the container for the resize to take effect.","consumes":["application/octet-stream"],"produces":["text/plain"],"tags":["Container"],"summary":"Resize a container TTY","operationId":"ContainerResize","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"integer","description":"Height of the tty session in characters","name":"h","in":"query"},{"type":"integer","description":"Width of the tty session in characters","name":"w","in":"query"}],"responses":{"200":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"cannot resize container","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/restart":{"post":{"tags":["Container"],"summary":"Restart a container","operationId":"ContainerRestart","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"integer","description":"Number of seconds to wait before killing the container","name":"t","in":"query"}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/start":{"post":{"tags":["Container"],"summary":"Start a container","operationId":"ContainerStart","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","description":"Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.","name":"detachKeys","in":"query"}],"responses":{"204":{"description":"no error"},"304":{"description":"container already started","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/stats":{"get":{"description":"This endpoint returns a live stream of a container’s resource usage\nstatistics.\n\nThe `precpu_stats` is the CPU statistic of the *previous* read, and is\nused to calculate the CPU usage percentage. It is not an exact copy\nof the `cpu_stats` field.\n\nIf either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is\nnil then for compatibility with older daemons the length of the\ncorresponding `cpu_usage.percpu_usage` array should be used.\n","produces":["application/json"],"tags":["Container"],"summary":"Get container stats based on resource usage","operationId":"ContainerStats","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"boolean","default":true,"description":"Stream the output. If false, the stats will be output once and then it will disconnect.","name":"stream","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"object"},"examples":{"application/json":{"blkio_stats":{},"cpu_stats":{"cpu_usage":{"percpu_usage":[8646879,24472255,36438778,30657443],"total_usage":100215355,"usage_in_kernelmode":30000000,"usage_in_usermode":50000000},"online_cpus":4,"system_cpu_usage":739306590000000,"throttling_data":{"periods":0,"throttled_periods":0,"throttled_time":0}},"memory_stats":{"failcnt":0,"limit":67108864,"max_usage":6651904,"stats":{"active_anon":6537216,"active_file":0,"cache":0,"hierarchical_memory_limit":67108864,"inactive_anon":0,"inactive_file":0,"mapped_file":0,"pgfault":964,"pgmajfault":0,"pgpgin":477,"pgpgout":414,"rss":6537216,"rss_huge":6291456,"total_active_anon":6537216,"total_active_file":0,"total_cache":0,"total_inactive_anon":0,"total_inactive_file":0,"total_mapped_file":0,"total_pgfault":964,"total_pgmajfault":0,"total_pgpgin":477,"total_pgpgout":414,"total_rss":6537216,"total_rss_huge":6291456,"total_unevictable":0,"total_writeback":0,"unevictable":0,"writeback":0},"usage":6537216},"networks":{"eth0":{"rx_bytes":5338,"rx_dropped":0,"rx_errors":0,"rx_packets":36,"tx_bytes":648,"tx_dropped":0,"tx_errors":0,"tx_packets":8},"eth5":{"rx_bytes":4641,"rx_dropped":0,"rx_errors":0,"rx_packets":26,"tx_bytes":690,"tx_dropped":0,"tx_errors":0,"tx_packets":9}},"pids_stats":{"current":3},"precpu_stats":{"cpu_usage":{"percpu_usage":[8646879,24350896,36438778,30657443],"total_usage":100093996,"usage_in_kernelmode":30000000,"usage_in_usermode":50000000},"online_cpus":4,"system_cpu_usage":9492140000000,"throttling_data":{"periods":0,"throttled_periods":0,"throttled_time":0}},"read":"2015-01-08T22:57:31.547920715Z"}}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/stop":{"post":{"tags":["Container"],"summary":"Stop a container","operationId":"ContainerStop","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"integer","description":"Number of seconds to wait before killing the container","name":"t","in":"query"}],"responses":{"204":{"description":"no error"},"304":{"description":"container already stopped","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/top":{"get":{"description":"On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows.","tags":["Container"],"summary":"List processes running inside a container","operationId":"ContainerTop","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","default":"-ef","description":"The arguments to pass to `ps`. For example, `aux`","name":"ps_args","in":"query"}],"responses":{"200":{"description":"no error","schema":{"description":"OK response to ContainerTop operation","type":"object","title":"ContainerTopResponse","properties":{"Processes":{"description":"Each process running in the container, where each is process is an array of values corresponding to the titles","type":"array","items":{"type":"array","items":{"type":"string"}}},"Titles":{"description":"The ps column titles","type":"array","items":{"type":"string"}}}},"examples":{"application/json":{"Processes":[["root","13642","882","0","17:03","pts/0","00:00:00","/bin/bash"],["root","13735","13642","0","17:06","pts/0","00:00:00","sleep 10"]],"Titles":["UID","PID","PPID","C","STIME","TTY","TIME","CMD"]}}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/unpause":{"post":{"description":"Resume a container which has been paused.","tags":["Container"],"summary":"Unpause a container","operationId":"ContainerUnpause","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/containers/{id}/wait":{"post":{"description":"Block until a container stops, then returns the exit code.","produces":["application/json"],"tags":["Container"],"summary":"Wait for a container","operationId":"ContainerWait","parameters":[{"type":"string","description":"ID or name of the container","name":"id","in":"path","required":true},{"type":"string","default":"not-running","description":"Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'.","name":"condition","in":"query"}],"responses":{"200":{"description":"The container has exit.","schema":{"description":"OK response to ContainerWait operation","type":"object","title":"ContainerWaitResponse","required":["StatusCode"],"properties":{"Error":{"description":"container waiting error, if any","type":"object","properties":{"Message":{"description":"Details of an error","type":"string"}}},"StatusCode":{"description":"Exit code of the container","type":"integer","x-nullable":false}}}},"404":{"description":"no such container","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such container: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/defaultCollection/{userID}":{"get":{"description":"Retrieve a user's default collection.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve a user's default collection.","operationId":"Get user default collection","parameters":[{"type":"string","default":"","description":"ID of the user","name":"userID","in":"path","required":true}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/authz.Collection"}}}},"put":{"description":"Set a user's default collection.","consumes":["application/json"],"tags":["UCP"],"summary":"Set a user's default collection.","operationId":"Set user default collection","parameters":[{"type":"string","default":"","description":"ID of the user","name":"userID","in":"path","required":true},{"type":"authz.CollectionID","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/authz.CollectionID"}}],"responses":{"201":{"description":"Success"}}},"delete":{"description":"Delete the default collection setting for a user","tags":["UCP"],"summary":"Delete the default collection setting for a user","operationId":"DeleteUserDefaultCollection","parameters":[{"type":"string","default":"","description":"ID of the user whose default collection mapping will be deleted","name":"userID","in":"path","required":true}],"responses":{"204":{"description":"Success"}}}},"/defaultCollectionRole":{"get":{"description":"Retrieve the role for the logged-in user's default collection.","produces":["application/json"],"tags":["UCP"],"summary":"Retrieve the role for the logged-in user's default collection.","operationId":"Get the logged-in user's role for their default collection","responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/role.Role"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/role.Role"}}}}},"/distribution/{name}/json":{"get":{"description":"Return image digest and platform information by contacting the registry.","produces":["application/json"],"tags":["Distribution"],"summary":"Get image information from the registry","operationId":"DistributionInspect","parameters":[{"type":"string","description":"Image name or id","name":"name","in":"path","required":true}],"responses":{"200":{"description":"descriptor and platform information","schema":{"type":"object","title":"DistributionInspectResponse","required":["Descriptor","Platforms"],"properties":{"Descriptor":{"description":"A descriptor struct containing digest, media type, and size","type":"object","properties":{"Digest":{"type":"string"},"MediaType":{"type":"string"},"Size":{"type":"integer","format":"int64"},"URLs":{"type":"array","items":{"type":"string"}}}},"Platforms":{"description":"An array containing all platforms supported by the image","type":"array","items":{"type":"object","properties":{"Architecture":{"type":"string"},"Features":{"type":"array","items":{"type":"string"}},"OS":{"type":"string"},"OSFeatures":{"type":"array","items":{"type":"string"}},"OSVersion":{"type":"string"},"Variant":{"type":"string"}}}}},"x-go-name":"DistributionInspect"},"examples":{"application/json":{"Descriptor":{"Digest":"sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96","MediaType":"application/vnd.docker.distribution.manifest.v2+json","Size":3987495,"URLs":[""]},"Platforms":[{"Architecture":"amd64","Features":[""],"OS":"linux","OSFeatures":[""],"OSVersion":"","Variant":""}]}}},"401":{"description":"Failed authentication or no image found","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such image: someimage (tag: latest)"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/events":{"get":{"description":"Stream real-time events from the server.\n\nVarious objects within Docker report events when something happens to them.\n\nContainers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update`\n\nImages report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag`\n\nVolumes report these events: `create`, `mount`, `unmount`, and `destroy`\n\nNetworks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove`\n\nThe Docker daemon reports these events: `reload`\n\nServices report these events: `create`, `update`, and `remove`\n\nNodes report these events: `create`, `update`, and `remove`\n\nSecrets report these events: `create`, `update`, and `remove`\n\nConfigs report these events: `create`, `update`, and `remove`\n","produces":["application/json"],"tags":["System"],"summary":"Monitor events","operationId":"SystemEvents","parameters":[{"type":"string","description":"Show events created since this timestamp then stream new events.","name":"since","in":"query"},{"type":"string","description":"Show events created until this timestamp then stop streaming.","name":"until","in":"query"},{"type":"string","description":"A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:\n\n- `config=` config name or ID\n- `container=` container name or ID\n- `daemon=` daemon name or ID\n- `event=` event type\n- `image=` image name or ID\n- `label=` image or container label\n- `network=` network name or ID\n- `node=` node ID\n- `plugin`= plugin name or ID\n- `scope`= local or swarm\n- `secret=` secret name or ID\n- `service=` service name or ID\n- `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config`\n- `volume=` volume name\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"object","title":"SystemEventsResponse","properties":{"Action":{"description":"The type of event","type":"string"},"Actor":{"type":"object","properties":{"Attributes":{"description":"Various key/value attributes of the object, depending on its type","type":"object","additionalProperties":{"type":"string"}},"ID":{"description":"The ID of the object emitting the event","type":"string"}}},"Type":{"description":"The type of object emitting the event","type":"string"},"time":{"description":"Timestamp of event","type":"integer"},"timeNano":{"description":"Timestamp of event, with nanosecond accuracy","type":"integer","format":"int64"}}},"examples":{"application/json":{"Action":"create","Actor":{"Attributes":{"com.example.some-label":"some-label-value","image":"alpine","name":"my-container"},"ID":"ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"},"Type":"container","time":1461943101}}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/exec/{id}/json":{"get":{"description":"Return low-level information about an exec instance.","produces":["application/json"],"tags":["Exec"],"summary":"Inspect an exec instance","operationId":"ExecInspect","parameters":[{"type":"string","description":"Exec instance ID","name":"id","in":"path","required":true}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"ExecInspectResponse","properties":{"CanRemove":{"type":"boolean"},"ContainerID":{"type":"string"},"DetachKeys":{"type":"string"},"ExitCode":{"type":"integer"},"ID":{"type":"string"},"OpenStderr":{"type":"boolean"},"OpenStdin":{"type":"boolean"},"OpenStdout":{"type":"boolean"},"Pid":{"description":"The system process ID for the exec process.","type":"integer"},"ProcessConfig":{"$ref":"#/definitions/ProcessConfig"},"Running":{"type":"boolean"}}},"examples":{"application/json":{"CanRemove":false,"ContainerID":"b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126","DetachKeys":"","ExitCode":2,"ID":"f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b","OpenStderr":true,"OpenStdin":true,"OpenStdout":true,"Pid":42000,"ProcessConfig":{"arguments":["-c","exit 2"],"entrypoint":"sh","privileged":false,"tty":true,"user":"1000"},"Running":false}}},"404":{"description":"No such exec instance","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/exec/{id}/resize":{"post":{"description":"Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance.","tags":["Exec"],"summary":"Resize an exec instance","operationId":"ExecResize","parameters":[{"type":"string","description":"Exec instance ID","name":"id","in":"path","required":true},{"type":"integer","description":"Height of the TTY session in characters","name":"h","in":"query"},{"type":"integer","description":"Width of the TTY session in characters","name":"w","in":"query"}],"responses":{"201":{"description":"No error"},"404":{"description":"No such exec instance","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/exec/{id}/start":{"post":{"description":"Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command.","consumes":["application/json"],"produces":["application/vnd.docker.raw-stream"],"tags":["Exec"],"summary":"Start an exec instance","operationId":"ExecStart","parameters":[{"name":"execStartConfig","in":"body","schema":{"type":"object","properties":{"Detach":{"description":"Detach from the command.","type":"boolean"},"Tty":{"description":"Allocate a pseudo-TTY.","type":"boolean"}},"example":{"Detach":false,"Tty":false}}},{"type":"string","description":"Exec instance ID","name":"id","in":"path","required":true}],"responses":{"200":{"description":"No error"},"404":{"description":"No such exec instance","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"Container is stopped or paused","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/id/":{"get":{"description":"Identify the Currently Authenticated Account.","produces":["application/json"],"tags":["Identity"],"summary":"Identify the Currently Authenticated Account.","operationId":"ID","responses":{"200":{"description":"Success, current account returned."},"default":{"description":"Success, current account returned."}}}},"/id/logout":{"post":{"description":"Delete the current session is use.","produces":["application/json"],"tags":["Identity"],"summary":"Delete the current session is use.","operationId":"Logout","responses":{"204":{"description":"Success, current session deleted."}}}},"/images/create":{"post":{"description":"Create an image by either pulling it from a registry or importing it.\n\nThe `create` request pulls the image onto every node in the swarm that exists at that time and that have the right operating system. Nodes that join the swarm later or that don't have the same base OS as the image won't get the image.","consumes":["text/plain","application/octet-stream"],"produces":["application/json"],"tags":["Image"],"summary":"Create an image","operationId":"ImageCreate","parameters":[{"type":"string","description":"Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed.","name":"fromImage","in":"query"},{"type":"string","description":"Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image.","name":"fromSrc","in":"query"},{"type":"string","description":"Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image.","name":"repo","in":"query"},{"type":"string","description":"Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled.","name":"tag","in":"query"},{"description":"Image content if the value `-` has been specified in fromSrc query parameter","name":"inputImage","in":"body","schema":{"type":"string"}},{"type":"string","description":"A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"},{"type":"string","default":"","description":"Platform in the format os[/arch[/variant]]","name":"platform","in":"query"}],"responses":{"200":{"description":"no error"},"404":{"description":"repository does not exist or no read access","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/get":{"get":{"description":"Get a tarball containing all images and metadata for several image repositories.\n\nFor each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID.\n\nFor details on the format, see [the export image endpoint](#operation/ImageGet).\n","produces":["application/x-tar"],"tags":["Image"],"summary":"Export several images","operationId":"ImageGetAll","parameters":[{"type":"array","items":{"type":"string"},"description":"Image names to filter by","name":"names","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"string","format":"binary"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/json":{"get":{"description":"Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image.\n\nThis endpoint returns the union of all images on all nodes in the cluster.","produces":["application/json"],"tags":["Image"],"summary":"List Images","operationId":"ImageList","parameters":[{"type":"boolean","default":false,"description":"Show all images. Only images from a final layer (no children) are shown by default.","name":"all","in":"query"},{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:\n\n- `before`=(`[:]`, `` or ``)\n- `dangling=true`\n- `label=key` or `label=\"key=value\"` of an image label\n- `reference`=(`[:]`)\n- `since`=(`[:]`, `` or ``)\n","name":"filters","in":"query"},{"type":"boolean","default":false,"description":"Show digest information as a `RepoDigests` field on each image.","name":"digests","in":"query"}],"responses":{"200":{"description":"Summary image data for the images matching the query","schema":{"type":"array","items":{"$ref":"#/definitions/ImageSummary"}},"examples":{"application/json":[{"Containers":2,"Created":1474925151,"Id":"sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8","Labels":{},"ParentId":"","RepoDigests":["ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787"],"RepoTags":["ubuntu:12.04","ubuntu:precise"],"SharedSize":0,"Size":103579269,"VirtualSize":103579269},{"Containers":5,"Created":1403128455,"Id":"sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175","Labels":{},"ParentId":"","RepoDigests":["ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7","ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3"],"RepoTags":["ubuntu:12.10","ubuntu:quantal"],"SharedSize":0,"Size":172064416,"VirtualSize":172064416}]}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/load":{"post":{"description":"Load a set of images and tags into a repository.\n\nFor details on the format, see [the export image endpoint](#operation/ImageGet).\n\n\nThe image is loaded on all nodes in the swarm that are compatible with the image's architecture. For example, Windows images aren't loaded on Linux nodes, and vice-versa.","consumes":["application/x-tar"],"produces":["application/json"],"tags":["Image"],"summary":"Import images","operationId":"ImageLoad","parameters":[{"description":"Tar archive containing images","name":"imagesTarball","in":"body","schema":{"type":"string","format":"binary"}},{"type":"boolean","default":false,"description":"Suppress progress details during load.","name":"quiet","in":"query"}],"responses":{"200":{"description":"no error"},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/prune":{"post":{"produces":["application/json"],"tags":["Image"],"summary":"Delete unused images","operationId":"ImagePrune","parameters":[{"type":"string","description":"Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters:\n\n- `dangling=` When set to `true` (or `1`), prune only\n unused *and* untagged images. When set to `false`\n (or `0`), all unused images are pruned.\n- `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.\n- `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"ImagePruneResponse","properties":{"ImagesDeleted":{"description":"Images that were deleted","type":"array","items":{"$ref":"#/definitions/ImageDeleteResponseItem"}},"SpaceReclaimed":{"description":"Disk space reclaimed in bytes","type":"integer","format":"int64"}}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/search":{"get":{"description":"Search for an image on Docker Hub.","produces":["application/json"],"tags":["Image"],"summary":"Search images","operationId":"ImageSearch","parameters":[{"type":"string","description":"Term to search","name":"term","in":"query","required":true},{"type":"integer","description":"Maximum number of results to return","name":"limit","in":"query"},{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:\n\n- `is-automated=(true|false)`\n- `is-official=(true|false)`\n- `stars=` Matches images that has at least 'number' stars.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"array","items":{"type":"object","title":"ImageSearchResponseItem","properties":{"description":{"type":"string"},"is_automated":{"type":"boolean"},"is_official":{"type":"boolean"},"name":{"type":"string"},"star_count":{"type":"integer"}}}},"examples":{"application/json":[{"description":"","is_automated":false,"is_official":false,"name":"wma55/u1210sshd","star_count":0},{"description":"","is_automated":false,"is_official":false,"name":"jdswinbank/sshd","star_count":0},{"description":"","is_automated":false,"is_official":false,"name":"vgauthier/sshd","star_count":0}]}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}":{"delete":{"description":"Remove an image, along with any untagged parent images that were\nreferenced by that image.\n\nImages can't be removed if they have descendant images, are being\nused by a running container or are being used by a build.\n","produces":["application/json"],"tags":["Image"],"summary":"Remove an image","operationId":"ImageDelete","parameters":[{"type":"string","description":"Image name or ID","name":"name","in":"path","required":true},{"type":"boolean","default":false,"description":"Remove the image even if it is being used by stopped containers or has other tags","name":"force","in":"query"},{"type":"boolean","default":false,"description":"Do not delete untagged parent images","name":"noprune","in":"query"}],"responses":{"200":{"description":"The image was deleted successfully","schema":{"type":"array","items":{"$ref":"#/definitions/ImageDeleteResponseItem"}},"examples":{"application/json":[{"Untagged":"3e2f21a89f"},{"Deleted":"3e2f21a89f"},{"Deleted":"53b4f83ac9"}]}},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"Conflict","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/get":{"get":{"description":"Get a tarball containing all images and metadata for a repository.\n\nIf `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced.\n\n### Image tarball format\n\nAn image tarball contains one directory per image layer (named using its long ID), each containing these files:\n\n- `VERSION`: currently `1.0` - the file format version\n- `json`: detailed layer information, similar to `docker inspect layer_id`\n- `layer.tar`: A tarfile containing the filesystem changes in this layer\n\nThe `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.\n\nIf the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.\n\n```json\n{\n \"hello-world\": {\n \"latest\": \"565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1\"\n }\n}\n```\n","produces":["application/x-tar"],"tags":["Image"],"summary":"Export an image","operationId":"ImageGet","parameters":[{"type":"string","description":"Image name or ID","name":"name","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"type":"string","format":"binary"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/history":{"get":{"description":"Return parent layers of an image.","produces":["application/json"],"tags":["Image"],"summary":"Get the history of an image","operationId":"ImageHistory","parameters":[{"type":"string","description":"Image name or ID","name":"name","in":"path","required":true}],"responses":{"200":{"description":"List of image layers","schema":{"type":"array","items":{"description":"individual image layer information in response to ImageHistory operation","type":"object","title":"HistoryResponseItem","required":["Id","Created","CreatedBy","Tags","Size","Comment"],"properties":{"Comment":{"type":"string","x-nullable":false},"Created":{"type":"integer","format":"int64","x-nullable":false},"CreatedBy":{"type":"string","x-nullable":false},"Id":{"type":"string","x-nullable":false},"Size":{"type":"integer","format":"int64","x-nullable":false},"Tags":{"type":"array","items":{"type":"string"}}},"x-go-name":"HistoryResponseItem"}},"examples":{"application/json":[{"Comment":"","Created":1398108230,"CreatedBy":"/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /","Id":"3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710","Size":182964289,"Tags":["ubuntu:lucid","ubuntu:10.04"]},{"Comment":"","Created":1398108222,"CreatedBy":"/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/","Id":"6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8","Size":0,"Tags":[]},{"Comment":"Imported from -","Created":1371157430,"CreatedBy":"","Id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","Size":0,"Tags":["scratch12:latest","scratch:latest"]}]}},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/json":{"get":{"description":"Return low-level information about an image.","produces":["application/json"],"tags":["Image"],"summary":"Inspect an image","operationId":"ImageInspect","parameters":[{"type":"string","description":"Image name or id","name":"name","in":"path","required":true}],"responses":{"200":{"description":"No error","schema":{"$ref":"#/definitions/Image"},"examples":{"application/json":{"Architecture":"amd64","Author":"","Comment":"","Config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/bash"],"Domainname":"","Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Hostname":"e611e15f9c9d","Image":"91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"","NetworkDisabled":false,"OnBuild":[],"OpenStdin":false,"PublishService":"","StdinOnce":false,"Tty":false,"User":"","WorkingDir":""},"Container":"cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a","ContainerConfig":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0"],"Domainname":"","Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Hostname":"e611e15f9c9d","Image":"91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c","Labels":{"com.example.license":"GPL","com.example.vendor":"Acme","com.example.version":"1.0"},"MacAddress":"","NetworkDisabled":false,"OnBuild":[],"OpenStdin":false,"PublishService":"","StdinOnce":false,"Tty":false,"User":"","WorkingDir":""},"Created":"2015-09-10T08:30:53.26995814Z","DockerVersion":"1.9.0-dev","GraphDriver":{"Data":{},"Name":"aufs"},"Id":"sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c","Os":"linux","Parent":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c","RepoDigests":["localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"],"RepoTags":["example:1.0","example:latest","example:stable"],"RootFS":{"Layers":["sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"],"Type":"layers"},"Size":0,"VirtualSize":188359297}}},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such image: someimage (tag: latest)"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/push":{"post":{"description":"Push an image to a registry.\n\nIf you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`.\n\nThe push is cancelled if the HTTP connection is closed.\n","consumes":["application/octet-stream"],"tags":["Image"],"summary":"Push an image","operationId":"ImagePush","parameters":[{"type":"string","description":"Image name or ID.","name":"name","in":"path","required":true},{"type":"string","description":"The tag to associate with the image on the registry.","name":"tag","in":"query"},{"type":"string","description":"A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header","required":true}],"responses":{"200":{"description":"No error"},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/images/{name}/tag":{"post":{"description":"Tag an image so that it becomes part of a repository.","tags":["Image"],"summary":"Tag an image","operationId":"ImageTag","parameters":[{"type":"string","description":"Image name or ID to tag.","name":"name","in":"path","required":true},{"type":"string","description":"The repository to tag in. For example, `someuser/someimage`.","name":"repo","in":"query"},{"type":"string","description":"The name of the new tag.","name":"tag","in":"query"}],"responses":{"201":{"description":"No error"},"400":{"description":"Bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"No such image","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"Conflict","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/info":{"get":{"description":"UCP returns a combination of info about the swarm, including:\n\n- Swarm manager status\n\n- Swarm scheduler strategy\n\n- Swarm-manager endpoints that are useful for administrator access when troubleshooting\n\n- Engine proxy endpoints for each node in the swarm that are useful for troubleshooting\n\n- Plugins present on the current manager node\n\n- Engine information for the current manager node","produces":["application/json"],"tags":["System"],"summary":"Get system information","operationId":"SystemInfo","responses":{"200":{"description":"No error","schema":{"$ref":"#/definitions/SystemInfo"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/kubernetesNamespaces":{"get":{"description":"Lists all namespaces for which a user has a grant","produces":["application/json"],"tags":["UCP"],"summary":"Lists all namespaces for which a user has a grant","operationId":"ListUserNamespaces","responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/v1.NamespaceList"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/v1.NamespaceList"}}}}},"/metricsdiscovery":{"get":{"tags":["UCP"],"summary":"/metricsdiscovery","operationId":"restfulNoop","responses":{}}},"/networks":{"get":{"description":"Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect).\n\nNote that it uses a different, smaller representation of a network than inspecting a single network. For example,\nthe list of containers attached to the network is not propagated in API versions 1.28 and up.\n\n\nNode-specific networks, like those with bridge and third-party drivers, are prefixed with the node name.","produces":["application/json"],"tags":["Network"],"summary":"List networks","operationId":"NetworkList","parameters":[{"type":"string","description":"JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters:\n\n- `driver=` Matches a network's driver.\n- `id=` Matches all or part of a network ID.\n- `label=` or `label==` of a network label.\n- `name=` Matches all or part of a network name.\n- `scope=[\"swarm\"|\"global\"|\"local\"]` Filters networks by scope (`swarm`, `global`, or `local`).\n- `type=[\"custom\"|\"builtin\"]` Filters networks by type. The `custom` keyword returns all user-defined networks.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"array","items":{"$ref":"#/definitions/Network"}},"examples":{"application/json":[{"Attachable":false,"Created":"2016-10-19T06:21:00.416543526Z","Driver":"bridge","EnableIPv6":false,"IPAM":{"Config":[{"Subnet":"172.17.0.0/16"}],"Driver":"default"},"Id":"f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566","Ingress":false,"Internal":false,"Name":"bridge","Options":{"com.docker.network.bridge.default_bridge":"true","com.docker.network.bridge.enable_icc":"true","com.docker.network.bridge.enable_ip_masquerade":"true","com.docker.network.bridge.host_binding_ipv4":"0.0.0.0","com.docker.network.bridge.name":"docker0","com.docker.network.driver.mtu":"1500"},"Scope":"local"},{"Attachable":false,"Containers":{},"Created":"0001-01-01T00:00:00Z","Driver":"null","EnableIPv6":false,"IPAM":{"Config":[],"Driver":"default"},"Id":"e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794","Ingress":false,"Internal":false,"Name":"none","Options":{},"Scope":"local"},{"Attachable":false,"Containers":{},"Created":"0001-01-01T00:00:00Z","Driver":"host","EnableIPv6":false,"IPAM":{"Config":[],"Driver":"default"},"Id":"13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e","Ingress":false,"Internal":false,"Name":"host","Options":{},"Scope":"local"}]}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/create":{"post":{"description":"If the name is prefixed with the name of a node, the `create` request is sent to the specified node. Use the '/' character to separate the node name, like `testnode/testnet`.","consumes":["application/json"],"produces":["application/json"],"tags":["Network"],"summary":"Create a network","operationId":"NetworkCreate","parameters":[{"description":"Network configuration","name":"networkConfig","in":"body","required":true,"schema":{"type":"object","required":["Name"],"properties":{"Attachable":{"description":"Globally scoped network is manually attachable by regular containers from workers in swarm mode.","type":"boolean"},"CheckDuplicate":{"description":"Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions.","type":"boolean"},"Driver":{"description":"Name of the network driver plugin to use.","type":"string","default":"bridge"},"EnableIPv6":{"description":"Enable IPv6 on the network.","type":"boolean"},"IPAM":{"description":"Optional custom IP scheme for the network.","$ref":"#/definitions/IPAM"},"Ingress":{"description":"Ingress network is the network which provides the routing-mesh in swarm mode.","type":"boolean"},"Internal":{"description":"Restrict external access to the network.","type":"boolean"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"The network's name.","type":"string"},"Options":{"description":"Network specific options to be used by the drivers.","type":"object","additionalProperties":{"type":"string"}}},"example":{"Attachable":false,"CheckDuplicate":false,"Driver":"bridge","EnableIPv6":true,"IPAM":{"Config":[{"Gateway":"172.20.10.11","IPRange":"172.20.10.0/24","Subnet":"172.20.0.0/16"},{"Gateway":"2001:db8:abcd::1011","Subnet":"2001:db8:abcd::/64"}],"Driver":"default","Options":{"foo":"bar"}},"Ingress":false,"Internal":true,"Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Name":"isolated_nw","Options":{"com.docker.network.bridge.default_bridge":"true","com.docker.network.bridge.enable_icc":"true","com.docker.network.bridge.enable_ip_masquerade":"true","com.docker.network.bridge.host_binding_ipv4":"0.0.0.0","com.docker.network.bridge.name":"docker0","com.docker.network.driver.mtu":"1500"}}}}],"responses":{"201":{"description":"No error","schema":{"type":"object","title":"NetworkCreateResponse","properties":{"Id":{"description":"The ID of the created network.","type":"string"},"Warning":{"type":"string"}},"example":{"Id":"22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30","Warning":""}}},"403":{"description":"operation not supported for pre-defined networks","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"plugin not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/prune":{"post":{"description":"Not supported on UCP.","produces":["application/json"],"tags":["Network"],"summary":"Delete unused networks","operationId":"NetworkPrune","parameters":[{"type":"string","description":"Filters to process on the prune list, encoded as JSON (a `map[string][]string`).\n\nAvailable filters:\n- `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.\n- `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"NetworkPruneResponse","properties":{"NetworksDeleted":{"description":"Networks that were deleted","type":"array","items":{"type":"string"}}}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/{id}":{"get":{"description":"Node-specific networks, like those with bridge and third-party drivers, are prefixed with the node name.","produces":["application/json"],"tags":["Network"],"summary":"Inspect a network","operationId":"NetworkInspect","parameters":[{"type":"string","description":"Network ID or name","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Detailed inspect output for troubleshooting","name":"verbose","in":"query"},{"type":"string","description":"Filter the network by scope (swarm, global, or local)","name":"scope","in":"query"}],"responses":{"200":{"description":"No error","schema":{"$ref":"#/definitions/Network"}},"404":{"description":"Network not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"tags":["Network"],"summary":"Remove a network","operationId":"NetworkDelete","parameters":[{"type":"string","description":"Network ID or name","name":"id","in":"path","required":true}],"responses":{"204":{"description":"No error"},"403":{"description":"operation not supported for pre-defined networks","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such network","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/{id}/connect":{"post":{"consumes":["application/json"],"tags":["Network"],"summary":"Connect a container to a network","operationId":"NetworkConnect","parameters":[{"type":"string","description":"Network ID or name","name":"id","in":"path","required":true},{"name":"container","in":"body","required":true,"schema":{"type":"object","properties":{"Container":{"description":"The ID or name of the container to connect to the network.","type":"string"},"EndpointConfig":{"$ref":"#/definitions/EndpointSettings"}},"example":{"Container":"3613f73ba0e4","EndpointConfig":{"IPAMConfig":{"IPv4Address":"172.24.56.89","IPv6Address":"2001:db8::5689"}}}}}],"responses":{"200":{"description":"No error"},"403":{"description":"Operation not supported for swarm scoped networks","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"Network or container not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/networks/{id}/disconnect":{"post":{"consumes":["application/json"],"tags":["Network"],"summary":"Disconnect a container from a network","operationId":"NetworkDisconnect","parameters":[{"type":"string","description":"Network ID or name","name":"id","in":"path","required":true},{"name":"container","in":"body","required":true,"schema":{"type":"object","properties":{"Container":{"description":"The ID or name of the container to disconnect from the network.","type":"string"},"Force":{"description":"Force the container to disconnect from the network.","type":"boolean"}}}}],"responses":{"200":{"description":"No error"},"403":{"description":"Operation not supported for swarm scoped networks","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"Network or container not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/nodes":{"get":{"description":"UCP augments the `Status.State` based on the status of the UCP components running on the current node.","tags":["Node"],"summary":"List nodes","operationId":"NodeList","parameters":[{"type":"string","description":"Filters to process on the nodes list, encoded as JSON (a `map[string][]string`).\n\nAvailable filters:\n- `id=`\n- `label=`\n- `membership=`(`accepted`|`pending`)`\n- `name=`\n- `role=`(`manager`|`worker`)`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Node"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/nodes/{id}":{"get":{"description":"UCP augments the `Status.State` based on the status of the UCP components running on the current node.","tags":["Node"],"summary":"Inspect a node","operationId":"NodeInspect","parameters":[{"type":"string","description":"The ID or name of the node","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Node"}},"404":{"description":"no such node","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"description":"If the current node is a manager, to keep the system healthy UCP attempts to unwind swarm components, like the KV store and auth store, from the node.","tags":["Node"],"summary":"Delete a node","operationId":"NodeDelete","parameters":[{"type":"string","description":"The ID or name of the node","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Force remove a node from the swarm","name":"force","in":"query"}],"responses":{"200":{"description":"no error"},"404":{"description":"no such node","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/nodes/{id}/update":{"post":{"tags":["Node"],"summary":"Update a node","operationId":"NodeUpdate","parameters":[{"type":"string","description":"The ID of the node","name":"id","in":"path","required":true},{"name":"body","in":"body","schema":{"$ref":"#/definitions/NodeSpec"}},{"type":"integer","format":"int64","description":"The version number of the node object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such node","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins":{"get":{"description":"Returns information about installed plugins.\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","produces":["application/json"],"tags":["Plugin"],"summary":"List plugins","operationId":"PluginList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters:\n\n- `capability=`\n- `enable=|`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"array","items":{"$ref":"#/definitions/Plugin"}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/create":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","consumes":["application/x-tar"],"tags":["Plugin"],"summary":"Create a plugin","operationId":"PluginCreate","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"query","required":true},{"description":"Path to tar containing plugin rootfs and manifest","name":"tarContext","in":"body","schema":{"type":"string","format":"binary"}}],"responses":{"204":{"description":"no error"},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/pull":{"post":{"description":"Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).\n\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","produces":["application/json"],"tags":["Plugin"],"summary":"Install a plugin","operationId":"PluginPull","parameters":[{"type":"string","description":"Remote reference for plugin to install.\n\nThe `:latest` tag is optional, and is used as the default if omitted.\n","name":"remote","in":"query","required":true},{"type":"string","description":"Local name for the pulled plugin.\n\nThe `:latest` tag is optional, and is used as the default if omitted.\n","name":"name","in":"query"},{"type":"string","description":"A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"},{"name":"body","in":"body","schema":{"type":"array","items":{"description":"Describes a permission accepted by the user upon installing the plugin.","type":"object","properties":{"Description":{"type":"string"},"Name":{"type":"string"},"Value":{"type":"array","items":{"type":"string"}}}},"example":[{"Description":"","Name":"network","Value":["host"]},{"Description":"","Name":"mount","Value":["/data"]},{"Description":"","Name":"device","Value":["/dev/cpu_dma_latency"]}]}}],"responses":{"204":{"description":"no error"},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}":{"delete":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Remove a plugin","operationId":"PluginDelete","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true},{"type":"boolean","default":false,"description":"Disable the plugin before removing. This may result in issues if the plugin is in use by a container.","name":"force","in":"query"}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Plugin"}},"404":{"description":"plugin is not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/disable":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Disable a plugin","operationId":"PluginDisable","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true}],"responses":{"200":{"description":"no error"},"404":{"description":"plugin is not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/enable":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Enable a plugin","operationId":"PluginEnable","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true},{"type":"integer","default":0,"description":"Set the HTTP client timeout (in seconds)","name":"timeout","in":"query"}],"responses":{"200":{"description":"no error"},"404":{"description":"plugin is not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/push":{"post":{"description":"Push a plugin to the registry.\n\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Push a plugin","operationId":"PluginPush","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true}],"responses":{"200":{"description":"no error"},"404":{"description":"plugin not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/set":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","consumes":["application/json"],"tags":["Plugin"],"summary":"Configure a plugin","operationId":"PluginSet","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true},{"name":"body","in":"body","schema":{"type":"array","items":{"type":"string"},"example":["DEBUG=1"]}}],"responses":{"204":{"description":"No error"},"404":{"description":"Plugin not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/plugins/{name}/upgrade":{"post":{"description":"\n\nNOTE: This API endpoint does not work through the UCP API. Admins can use this API directly on each individual node's Docker daemon.","tags":["Plugin"],"summary":"Upgrade a plugin","operationId":"PluginUpgrade","parameters":[{"type":"string","description":"The name of the plugin. The `:latest` tag is optional, and is the default if omitted.","name":"name","in":"path","required":true},{"type":"string","description":"Remote reference to upgrade to.\n\nThe `:latest` tag is optional, and is used as the default if omitted.\n","name":"remote","in":"query","required":true},{"type":"string","description":"A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"},{"name":"body","in":"body","schema":{"type":"array","items":{"description":"Describes a permission accepted by the user upon installing the plugin.","type":"object","properties":{"Description":{"type":"string"},"Name":{"type":"string"},"Value":{"type":"array","items":{"type":"string"}}}},"example":[{"Description":"","Name":"network","Value":["host"]},{"Description":"","Name":"mount","Value":["/data"]},{"Description":"","Name":"device","Value":["/dev/cpu_dma_latency"]}]}}],"responses":{"204":{"description":"no error"},"404":{"description":"plugin not installed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/roles":{"get":{"description":"Lists all roles in the system.","produces":["application/json"],"tags":["UCP"],"summary":"Lists all roles in the system.","operationId":"ListRoles","responses":{"200":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/role.Role"}}},"default":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/role.Role"}}}}},"post":{"description":"Creates a new custom role","consumes":["application/json"],"tags":["UCP"],"summary":"Creates a new custom role","operationId":"CreateRole","parameters":[{"type":"role.Role","name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/role.Role"}}],"responses":{"201":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/authz.RoleCreateResponse"}}}}}},"/roles/{role}":{"get":{"description":"Retrieves a single role by ID","produces":["application/json"],"tags":["UCP"],"summary":"Retrieves a single role by ID","operationId":"GetRole","parameters":[{"type":"string","default":"","description":"Name of the role to get","name":"role","in":"path","required":true}],"responses":{"200":{"description":"Success","schema":{"$ref":"#/definitions/role.Role"}},"default":{"description":"Success","schema":{"$ref":"#/definitions/role.Role"}}}},"delete":{"description":"Deletes a role by name","produces":["application/json"],"tags":["UCP"],"summary":"Deletes a role by name","operationId":"DeleteRole","parameters":[{"type":"string","default":"","description":"Name of the role to delete","name":"role","in":"path","required":true}],"responses":{"204":{"description":"Success"}}}},"/secrets":{"get":{"produces":["application/json"],"tags":["Secret"],"summary":"List secrets","operationId":"SecretList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters:\n\n- `id=`\n- `label= or label==value`\n- `name=`\n- `names=`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Secret"},"example":[{"CreatedAt":"2017-07-20T13:55:28.678958722Z","ID":"blt1owaxmitz71s9v5zh81zun","Spec":{"Driver":{"Name":"secret-bucket","Options":{"OptionA":"value for driver option A","OptionB":"value for driver option B"}},"Labels":{"some.label":"some.value"},"Name":"mysql-passwd"},"UpdatedAt":"2017-07-20T13:55:28.678958722Z","Version":{"Index":85}},{"CreatedAt":"2016-11-05T01:20:17.327670065Z","ID":"ktnbjxoalbkvbvedmg1urrz8h","Spec":{"Labels":{"foo":"bar"},"Name":"app-dev.crt"},"UpdatedAt":"2016-11-05T01:20:17.327670065Z","Version":{"Index":11}}]}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/secrets/create":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Secret"],"summary":"Create a secret","operationId":"SecretCreate","parameters":[{"name":"body","in":"body","schema":{"allOf":[{"$ref":"#/definitions/SecretSpec"},{"type":"object","example":{"Data":"VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==","Driver":{"Name":"secret-bucket","Options":{"OptionA":"value for driver option A","OptionB":"value for driver option B"}},"Labels":{"foo":"bar"},"Name":"app-key.crt"}}]}}],"responses":{"201":{"description":"no error","schema":{"$ref":"#/definitions/IdResponse"}},"409":{"description":"name conflicts with an existing object","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/secrets/{id}":{"get":{"produces":["application/json"],"tags":["Secret"],"summary":"Inspect a secret","operationId":"SecretInspect","parameters":[{"type":"string","description":"ID of the secret","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Secret"},"examples":{"application/json":{"CreatedAt":"2016-11-05T01:20:17.327670065Z","ID":"ktnbjxoalbkvbvedmg1urrz8h","Spec":{"Driver":{"Name":"secret-bucket","Options":{"OptionA":"value for driver option A","OptionB":"value for driver option B"}},"Labels":{"foo":"bar"},"Name":"app-dev.crt"},"UpdatedAt":"2016-11-05T01:20:17.327670065Z","Version":{"Index":11}}}},"404":{"description":"secret not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"produces":["application/json"],"tags":["Secret"],"summary":"Delete a secret","operationId":"SecretDelete","parameters":[{"type":"string","description":"ID of the secret","name":"id","in":"path","required":true}],"responses":{"204":{"description":"no error"},"404":{"description":"secret not found","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/secrets/{id}/update":{"post":{"tags":["Secret"],"summary":"Update a Secret","operationId":"SecretUpdate","parameters":[{"type":"string","description":"The ID or name of the secret","name":"id","in":"path","required":true},{"description":"The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values.","name":"body","in":"body","schema":{"$ref":"#/definitions/SecretSpec"}},{"type":"integer","format":"int64","description":"The version number of the secret object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such secret","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services":{"get":{"tags":["Service"],"summary":"List services","operationId":"ServiceList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters:\n\n- `id=`\n- `label=`\n- `mode=[\"replicated\"|\"global\"]`\n- `name=`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Service"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services/create":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Service"],"summary":"Create a service","operationId":"ServiceCreate","parameters":[{"name":"body","in":"body","required":true,"schema":{"allOf":[{"$ref":"#/definitions/ServiceSpec"},{"type":"object","example":{"EndpointSpec":{"Ports":[{"Protocol":"tcp","PublishedPort":8080,"TargetPort":80}]},"Labels":{"foo":"bar"},"Mode":{"Replicated":{"Replicas":4}},"Name":"web","RollbackConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":1},"TaskTemplate":{"ContainerSpec":{"DNSConfig":{"Nameservers":["8.8.8.8"],"Options":["timeout:3"],"Search":["example.org"]},"Hosts":["10.10.10.10 host1","ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"],"Image":"nginx:alpine","Mounts":[{"ReadOnly":true,"Source":"web-data","Target":"/usr/share/nginx/html","Type":"volume","VolumeOptions":{"DriverConfig":{},"Labels":{"com.example.something":"something-value"}}}],"Secrets":[{"File":{"GID":"33","Mode":384,"Name":"www.example.org.key","UID":"33"},"SecretID":"fpjqlhnwb19zds35k8wn80lq9","SecretName":"example_org_domain_key"}],"User":"33"},"LogDriver":{"Name":"json-file","Options":{"max-file":"3","max-size":"10M"}},"Placement":{},"Resources":{"Limits":{"MemoryBytes":104857600},"Reservations":{}},"RestartPolicy":{"Condition":"on-failure","Delay":10000000000,"MaxAttempts":10}},"UpdateConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":2}}}]}},{"type":"string","description":"A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"}],"responses":{"201":{"description":"no error","schema":{"type":"object","title":"ServiceCreateResponse","properties":{"ID":{"description":"The ID of the created service.","type":"string"},"Warning":{"description":"Optional warning message","type":"string"}},"example":{"ID":"ak7w3gjqoa3kuz8xcpnyy0pvl","Warning":"unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"}}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"403":{"description":"network is not eligible for services","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"name conflicts with an existing service","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services/{id}":{"get":{"tags":["Service"],"summary":"Inspect a service","operationId":"ServiceInspect","parameters":[{"type":"string","description":"ID or name of service.","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Fill empty fields with default values.","name":"insertDefaults","in":"query"}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Service"}},"404":{"description":"no such service","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"tags":["Service"],"summary":"Delete a service","operationId":"ServiceDelete","parameters":[{"type":"string","description":"ID or name of service.","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error"},"404":{"description":"no such service","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services/{id}/logs":{"get":{"description":"Get `stdout` and `stderr` logs from a service.\n\n**Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers.\n","produces":["application/vnd.docker.raw-stream","application/json"],"tags":["Service"],"summary":"Get service logs","operationId":"ServiceLogs","parameters":[{"type":"string","description":"ID or name of the service","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Show service context and extra details provided to logs.","name":"details","in":"query"},{"type":"boolean","default":false,"description":"Return the logs as a stream.\n\nThis will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).\n","name":"follow","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stdout`","name":"stdout","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stderr`","name":"stderr","in":"query"},{"type":"integer","default":0,"description":"Only return logs since this time, as a UNIX timestamp","name":"since","in":"query"},{"type":"boolean","default":false,"description":"Add timestamps to every log line","name":"timestamps","in":"query"},{"type":"string","default":"all","description":"Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines.","name":"tail","in":"query"}],"responses":{"101":{"description":"logs returned as a stream","schema":{"type":"string","format":"binary"}},"200":{"description":"logs returned as a string in response body","schema":{"type":"string"}},"404":{"description":"no such service","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such service: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/services/{id}/update":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Service"],"summary":"Update a service","operationId":"ServiceUpdate","parameters":[{"type":"string","description":"ID or name of service.","name":"id","in":"path","required":true},{"name":"body","in":"body","required":true,"schema":{"allOf":[{"$ref":"#/definitions/ServiceSpec"},{"type":"object","example":{"EndpointSpec":{"Mode":"vip"},"Mode":{"Replicated":{"Replicas":1}},"Name":"top","RollbackConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":1},"TaskTemplate":{"ContainerSpec":{"Args":["top"],"Image":"busybox"},"ForceUpdate":0,"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"UpdateConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":2}}}]}},{"type":"integer","description":"The version number of the service object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true},{"type":"string","default":"spec","description":"If the X-Registry-Auth header is not specified, this parameter indicates where to find registry authorization credentials. The valid values are `spec` and `previous-spec`.","name":"registryAuthFrom","in":"query"},{"type":"string","description":"Set to this parameter to `previous` to cause a server-side rollback to the previous service spec. The supplied spec will be ignored in this case.","name":"rollback","in":"query"},{"type":"string","description":"A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)","name":"X-Registry-Auth","in":"header"}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/ServiceUpdateResponse"}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"404":{"description":"no such service","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm":{"get":{"tags":["Swarm"],"summary":"Inspect swarm","operationId":"SwarmInspect","responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Swarm"}},"404":{"description":"no such swarm","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm/init":{"post":{"produces":["application/json","text/plain"],"tags":["Swarm"],"summary":"Initialize a new swarm","operationId":"SwarmInit","parameters":[{"name":"body","in":"body","required":true,"schema":{"type":"object","properties":{"AdvertiseAddr":{"description":"Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible.","type":"string"},"DataPathAddr":{"description":"Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`,\nor an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`\nis used.\n\nThe `DataPathAddr` specifies the address that global scope network drivers will publish towards other\nnodes in order to reach the containers running on this node. Using this parameter it is possible to\nseparate the container data traffic from the management traffic of the cluster.\n","type":"string"},"DefaultAddrPool":{"description":"Default Address Pool specifies default subnet pools for global scope networks.\n","type":"array","items":{"type":"string","example":["10.10.0.0/16","20.20.0.0/16"]}},"ForceNewCluster":{"description":"Force creation of a new swarm.","type":"boolean"},"ListenAddr":{"description":"Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used.","type":"string"},"Spec":{"$ref":"#/definitions/SwarmSpec"},"SubnetSize":{"description":"SubnetSize specifies the subnet size of the networks created from the default subnet pool\n","type":"integer","format":"uint32"}},"example":{"AdvertiseAddr":"192.168.1.1:2377","DefaultAddrPool":["10.10.0.0/8","20.20.0.0/8"],"ForceNewCluster":false,"ListenAddr":"0.0.0.0:2377","Spec":{"CAConfig":{},"Dispatcher":{},"EncryptionConfig":{"AutoLockManagers":false},"Orchestration":{},"Raft":{}},"SubnetSize":24}}}],"responses":{"200":{"description":"no error","schema":{"description":"The node ID","type":"string","example":"7v2t30z9blmxuhnyo6s4cpenp"}},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is already part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm/join":{"post":{"tags":["Swarm"],"summary":"Join an existing swarm","operationId":"SwarmJoin","parameters":[{"name":"body","in":"body","required":true,"schema":{"type":"object","properties":{"AdvertiseAddr":{"description":"Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible.","type":"string"},"DataPathAddr":{"description":"Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`,\nor an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`\nis used.\n\nThe `DataPathAddr` specifies the address that global scope network drivers will publish towards other\nnodes in order to reach the containers running on this node. Using this parameter it is possible to\nseparate the container data traffic from the management traffic of the cluster.\n","type":"string"},"JoinToken":{"description":"Secret token for joining this swarm.","type":"string"},"ListenAddr":{"description":"Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP).","type":"string"},"RemoteAddrs":{"description":"Addresses of manager nodes already participating in the swarm.","type":"string"}},"example":{"AdvertiseAddr":"192.168.1.1:2377","JoinToken":"SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2","ListenAddr":"0.0.0.0:2377","RemoteAddrs":["node1:2377"]}}}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is already part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm/leave":{"post":{"tags":["Swarm"],"summary":"Leave a swarm","operationId":"SwarmLeave","parameters":[{"type":"boolean","default":false,"description":"Force leave swarm, even if this is the last manager or that it will break the cluster.","name":"force","in":"query"}],"responses":{"200":{"description":"no error"},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/swarm/update":{"post":{"tags":["Swarm"],"summary":"Update a swarm","operationId":"SwarmUpdate","parameters":[{"name":"body","in":"body","required":true,"schema":{"$ref":"#/definitions/SwarmSpec"}},{"type":"integer","format":"int64","description":"The version number of the swarm object being updated. This is required to avoid conflicting writes.","name":"version","in":"query","required":true},{"type":"boolean","default":false,"description":"Rotate the worker join token.","name":"rotateWorkerToken","in":"query"},{"type":"boolean","default":false,"description":"Rotate the manager join token.","name":"rotateManagerToken","in":"query"},{"type":"boolean","default":false,"description":"Rotate the manager unlock key.","name":"rotateManagerUnlockKey","in":"query"}],"responses":{"200":{"description":"no error"},"400":{"description":"bad parameter","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/tasks":{"get":{"produces":["application/json"],"tags":["Task"],"summary":"List tasks","operationId":"TaskList","parameters":[{"type":"string","description":"A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters:\n\n- `desired-state=(running | shutdown | accepted)`\n- `id=`\n- `label=key` or `label=\"key=value\"`\n- `name=`\n- `node=`\n- `service=`\n","name":"filters","in":"query"}],"responses":{"200":{"description":"no error","schema":{"type":"array","items":{"$ref":"#/definitions/Task"},"example":[{"CreatedAt":"2016-06-07T21:07:31.171892745Z","DesiredState":"running","ID":"0kzzo1i0y4jz6027t0k7aezc7","NetworksAttachments":[{"Addresses":["10.255.0.10/16"],"Network":{"CreatedAt":"2016-06-07T20:31:11.912919752Z","DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"ID":"4qvuz4ko70xaltuqbt8956gd1","IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{"Name":"default"}},"Spec":{"DriverConfiguration":{},"IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{}},"Labels":{"com.docker.swarm.internal":"true"},"Name":"ingress"},"UpdatedAt":"2016-06-07T21:07:29.955277358Z","Version":{"Index":18}}}],"NodeID":"60gvrl6tm78dmak4yl7srz94v","ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"Spec":{"ContainerSpec":{"Image":"redis"},"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"Status":{"ContainerStatus":{"ContainerID":"e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035","PID":677},"Message":"started","State":"running","Timestamp":"2016-06-07T21:07:31.290032978Z"},"UpdatedAt":"2016-06-07T21:07:31.376370513Z","Version":{"Index":71}},{"CreatedAt":"2016-06-07T21:07:30.019104782Z","DesiredState":"shutdown","ID":"1yljwbmlr8er2waf8orvqpwms","Name":"hopeful_cori","NetworksAttachments":[{"Addresses":["10.255.0.5/16"],"Network":{"CreatedAt":"2016-06-07T20:31:11.912919752Z","DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"ID":"4qvuz4ko70xaltuqbt8956gd1","IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{"Name":"default"}},"Spec":{"DriverConfiguration":{},"IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{}},"Labels":{"com.docker.swarm.internal":"true"},"Name":"ingress"},"UpdatedAt":"2016-06-07T21:07:29.955277358Z","Version":{"Index":18}}}],"NodeID":"60gvrl6tm78dmak4yl7srz94v","ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"Spec":{"ContainerSpec":{"Image":"redis"},"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"Status":{"ContainerStatus":{"ContainerID":"1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"},"Message":"shutdown","State":"shutdown","Timestamp":"2016-06-07T21:07:30.202183143Z"},"UpdatedAt":"2016-06-07T21:07:30.231958098Z","Version":{"Index":30}}]}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/tasks/{id}":{"get":{"produces":["application/json"],"tags":["Task"],"summary":"Inspect a task","operationId":"TaskInspect","parameters":[{"type":"string","description":"ID of the task","name":"id","in":"path","required":true}],"responses":{"200":{"description":"no error","schema":{"$ref":"#/definitions/Task"}},"404":{"description":"no such task","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/tasks/{id}/logs":{"get":{"description":"Get `stdout` and `stderr` logs from a task.\n\n**Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers.\n","produces":["application/vnd.docker.raw-stream","application/json"],"tags":["UCP"],"summary":"Get task logs","operationId":"TaskLogs","parameters":[{"type":"string","description":"ID of the task","name":"id","in":"path","required":true},{"type":"boolean","default":false,"description":"Show task context and extra details provided to logs.","name":"details","in":"query"},{"type":"boolean","default":false,"description":"Return the logs as a stream.\n\nThis will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).\n","name":"follow","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stdout`","name":"stdout","in":"query"},{"type":"boolean","default":false,"description":"Return logs from `stderr`","name":"stderr","in":"query"},{"type":"integer","default":0,"description":"Only return logs since this time, as a UNIX timestamp","name":"since","in":"query"},{"type":"boolean","default":false,"description":"Add timestamps to every log line","name":"timestamps","in":"query"},{"type":"string","default":"all","description":"Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines.","name":"tail","in":"query"}],"responses":{"101":{"description":"logs returned as a stream","schema":{"type":"string","format":"binary"}},"200":{"description":"logs returned as a string in response body","schema":{"type":"string"}},"404":{"description":"no such task","schema":{"$ref":"#/definitions/ErrorResponse"},"examples":{"application/json":{"message":"No such task: c2ada9df5af8"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}},"503":{"description":"node is not part of a swarm","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/totalRole":{"get":{"description":"Returns a role with all operations that a user can perform against at least one collection in the system.","produces":["application/json"],"tags":["UCP"],"summary":"Returns a role with all operations that a user can perform against at least one collection in the system.","operationId":"TotalRole","responses":{"200":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/role.Role"}}},"default":{"description":"Success","schema":{"type":"array","items":{"$ref":"#/definitions/role.Role"}}}}}},"/version":{"get":{"description":"Returns the version of Docker that is running and various information about the system that Docker is running on.","produces":["application/json"],"tags":["System"],"summary":"Get version","operationId":"SystemVersion","responses":{"200":{"description":"no error","schema":{"type":"object","title":"SystemVersionResponse","properties":{"ApiVersion":{"type":"string"},"Arch":{"type":"string"},"BuildTime":{"type":"string"},"Components":{"type":"array","items":{"type":"object","required":["Name","Version"],"properties":{"Details":{"type":"object","x-nullable":true},"Name":{"type":"string"},"Version":{"type":"string","x-nullable":false}},"x-go-name":"ComponentVersion"}},"Experimental":{"type":"boolean"},"GitCommit":{"type":"string"},"GoVersion":{"type":"string"},"KernelVersion":{"type":"string"},"MinAPIVersion":{"type":"string"},"Os":{"type":"string"},"Platform":{"type":"object","required":["Name"],"properties":{"Name":{"type":"string"}}},"Version":{"type":"string"}}},"examples":{"application/json":{"ApiVersion":"1.27","Arch":"amd64","BuildTime":"2016-06-14T07:09:13.444803460+00:00","Experimental":true,"GitCommit":"deadbee","GoVersion":"go1.7.5","KernelVersion":"3.19.0-23-generic","MinAPIVersion":"1.12","Os":"linux","Version":"17.04.0"}}},"500":{"description":"server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/volumes":{"get":{"description":"If the name is prefixed with the name of a node, the request is sent to the specified node. Use the '/' character to distinguish the node name, like `testnode/testvolume`.","produces":["application/json"],"tags":["Volume"],"summary":"List volumes","operationId":"VolumeList","parameters":[{"type":"string","format":"json","description":"JSON encoded value of the filters (a `map[string][]string`) to\nprocess on the volumes list. Available filters:\n\n- `dangling=` When set to `true` (or `1`), returns all\n volumes that are not in use by a container. When set to `false`\n (or `0`), only volumes that are in use by one or more\n containers are returned.\n- `driver=` Matches volumes based on their driver.\n- `label=` or `label=:` Matches volumes based on\n the presence of a `label` alone or a `label` and a value.\n- `name=` Matches all or part of a volume name.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"Summary volume data that matches the query","schema":{"description":"Volume list response","type":"object","title":"VolumeListResponse","required":["Volumes","Warnings"],"properties":{"Volumes":{"description":"List of volumes","type":"array","items":{"$ref":"#/definitions/Volume"},"x-nullable":false},"Warnings":{"description":"Warnings that occurred when fetching the list of volumes","type":"array","items":{"type":"string"},"x-nullable":false}}},"examples":{"application/json":{"Volumes":[{"CreatedAt":"2017-07-19T12:00:26Z","Driver":"local","Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Mountpoint":"/var/lib/docker/volumes/tardis","Name":"tardis","Options":{"device":"tmpfs","o":"size=100m,uid=1000","type":"tmpfs"},"Scope":"local"}],"Warnings":[]}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/volumes/create":{"post":{"consumes":["application/json"],"produces":["application/json"],"tags":["Volume"],"summary":"Create a volume","operationId":"VolumeCreate","parameters":[{"description":"Volume configuration","name":"volumeConfig","in":"body","required":true,"schema":{"description":"Volume configuration","type":"object","title":"VolumeConfig","properties":{"Driver":{"description":"Name of the volume driver to use.","type":"string","default":"local","x-nullable":false},"DriverOpts":{"description":"A mapping of driver options and values. These options are passed directly to the driver and are driver specific.","type":"object","additionalProperties":{"type":"string"}},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"The new volume's name. If not specified, Docker generates a name.","type":"string","x-nullable":false}},"example":{"Driver":"custom","Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Name":"tardis"}}}],"responses":{"201":{"description":"The volume was created successfully","schema":{"$ref":"#/definitions/Volume"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/volumes/prune":{"post":{"produces":["application/json"],"tags":["Volume"],"summary":"Delete unused volumes","operationId":"VolumePrune","parameters":[{"type":"string","description":"Filters to process on the prune list, encoded as JSON (a `map[string][]string`).\n\nAvailable filters:\n- `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.\n","name":"filters","in":"query"}],"responses":{"200":{"description":"No error","schema":{"type":"object","title":"VolumePruneResponse","properties":{"SpaceReclaimed":{"description":"Disk space reclaimed in bytes","type":"integer","format":"int64"},"VolumesDeleted":{"description":"Volumes that were deleted","type":"array","items":{"type":"string"}}}}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}},"/volumes/{name}":{"get":{"description":"If the name is prefixed with the name of a node, the request is sent to the specified node. Use the '/' character to distinguish the node name, like `testnode/testvolume`.","produces":["application/json"],"tags":["Volume"],"summary":"Inspect a volume","operationId":"VolumeInspect","parameters":[{"type":"string","description":"Volume name or ID","name":"name","in":"path","required":true}],"responses":{"200":{"description":"No error","schema":{"$ref":"#/definitions/Volume"}},"404":{"description":"No such volume","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}},"delete":{"description":"Instruct the driver to remove the volume.","tags":["Volume"],"summary":"Remove a volume","operationId":"VolumeDelete","parameters":[{"type":"string","description":"Volume name or ID","name":"name","in":"path","required":true},{"type":"boolean","default":false,"description":"Force the removal of the volume","name":"force","in":"query"}],"responses":{"204":{"description":"The volume was removed"},"404":{"description":"No such volume or volume driver","schema":{"$ref":"#/definitions/ErrorResponse"}},"409":{"description":"Volume is in use and cannot be removed","schema":{"$ref":"#/definitions/ErrorResponse"}},"500":{"description":"Server error","schema":{"$ref":"#/definitions/ErrorResponse"}}}}}},"definitions":{"Address":{"description":"Address represents an IPv4 or IPv6 IP address.","type":"object","properties":{"Addr":{"description":"IP address.","type":"string"},"PrefixLen":{"description":"Mask length of the IP address.","type":"integer"}}},"AuthConfig":{"type":"object","properties":{"email":{"type":"string"},"password":{"type":"string"},"serveraddress":{"type":"string"},"username":{"type":"string"}},"example":{"password":"xxxx","serveraddress":"https://index.docker.io/v1/","username":"hannibal"}},"BuildInfo":{"type":"object","properties":{"aux":{"$ref":"#/definitions/ImageID"},"error":{"type":"string"},"errorDetail":{"$ref":"#/definitions/ErrorDetail"},"id":{"type":"string"},"progress":{"type":"string"},"progressDetail":{"$ref":"#/definitions/ProgressDetail"},"status":{"type":"string"},"stream":{"type":"string"}}},"ClusterInfo":{"description":"ClusterInfo represents information about the swarm as is returned by the\n\"/info\" endpoint. Join-tokens are not included.\n","type":"object","properties":{"CreatedAt":{"description":"Date and time at which the swarm was initialised in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n","type":"string","format":"dateTime","example":"2016-08-18T10:44:24.496525531Z"},"ID":{"description":"The ID of the swarm.","type":"string","example":"abajmipo7b4xz5ip2nrla6b11"},"RootRotationInProgress":{"description":"Whether there is currently a root CA rotation in progress for the swarm","type":"boolean","example":false},"Spec":{"$ref":"#/definitions/SwarmSpec"},"TLSInfo":{"$ref":"#/definitions/TLSInfo"},"UpdatedAt":{"description":"Date and time at which the swarm was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n","type":"string","format":"dateTime","example":"2017-08-09T07:09:37.632105588Z"},"Version":{"$ref":"#/definitions/ObjectVersion"}},"x-nullable":true},"Commit":{"description":"Commit holds the Git-commit (SHA1) that a binary was built from, as\nreported in the version-string of external tools, such as `containerd`,\nor `runC`.\n","type":"object","properties":{"Expected":{"description":"Commit ID of external tool expected by dockerd as set at build time.\n","type":"string","example":"2d41c047c83e09a6d61d464906feb2a2f3c52aa4"},"ID":{"description":"Actual commit ID of external tool.","type":"string","example":"cfb82a876ecc11b5ca0977d1733adbe58599088a"}}},"Config":{"type":"object","properties":{"CreatedAt":{"type":"string","format":"dateTime"},"ID":{"type":"string"},"Spec":{"$ref":"#/definitions/ConfigSpec"},"UpdatedAt":{"type":"string","format":"dateTime"},"Version":{"$ref":"#/definitions/ObjectVersion"}}},"ConfigSpec":{"type":"object","properties":{"Data":{"description":"Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))\nconfig data.\n","type":"string"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"User-defined name of the config.","type":"string"},"Templating":{"description":"Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n","$ref":"#/definitions/Driver"}}},"ContainerConfig":{"description":"Configuration for a container that is portable between hosts","type":"object","properties":{"ArgsEscaped":{"description":"Command is already escaped (Windows only)","type":"boolean"},"AttachStderr":{"description":"Whether to attach to `stderr`.","type":"boolean","default":true},"AttachStdin":{"description":"Whether to attach to `stdin`.","type":"boolean","default":false},"AttachStdout":{"description":"Whether to attach to `stdout`.","type":"boolean","default":true},"Cmd":{"description":"Command to run specified as a string or an array of strings.","type":"array","items":{"type":"string"}},"Domainname":{"description":"The domain name to use for the container.","type":"string"},"Entrypoint":{"description":"The entry point for the container as a string or an array of strings.\n\nIf the array consists of exactly one empty string (`[\"\"]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).\n","type":"array","items":{"type":"string"}},"Env":{"description":"A list of environment variables to set inside the container in the form `[\"VAR=value\", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value.\n","type":"array","items":{"type":"string"}},"ExposedPorts":{"description":"An object mapping ports to an empty object in the form:\n\n`{\"/\": {}}`\n","type":"object","additionalProperties":{"type":"object","default":{},"enum":[{}]}},"Healthcheck":{"$ref":"#/definitions/HealthConfig"},"Hostname":{"description":"The hostname to use for the container, as a valid RFC 1123 hostname.","type":"string"},"Image":{"description":"The name of the image to use when creating the container","type":"string"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"MacAddress":{"description":"MAC address of the container.","type":"string"},"NetworkDisabled":{"description":"Disable networking for the container.","type":"boolean"},"OnBuild":{"description":"`ONBUILD` metadata that were defined in the image's `Dockerfile`.","type":"array","items":{"type":"string"}},"OpenStdin":{"description":"Open `stdin`","type":"boolean","default":false},"Shell":{"description":"Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.","type":"array","items":{"type":"string"}},"StdinOnce":{"description":"Close `stdin` after one attached client disconnects","type":"boolean","default":false},"StopSignal":{"description":"Signal to stop a container as a string or unsigned integer.","type":"string","default":"SIGTERM"},"StopTimeout":{"description":"Timeout to stop a container in seconds.","type":"integer","default":10},"Tty":{"description":"Attach standard streams to a TTY, including `stdin` if it is not closed.","type":"boolean","default":false},"User":{"description":"The user that commands are run as inside the container.","type":"string"},"Volumes":{"description":"An object mapping mount point paths inside the container to empty objects.","type":"object","additionalProperties":{"type":"object","default":{},"enum":[{}]}},"WorkingDir":{"description":"The working directory for commands to run in.","type":"string"}}},"ContainerSummary":{"type":"array","items":{"type":"object","properties":{"Command":{"description":"Command to run when starting the container","type":"string"},"Created":{"description":"When the container was created","type":"integer","format":"int64"},"HostConfig":{"type":"object","properties":{"NetworkMode":{"type":"string"}}},"Id":{"description":"The ID of this container","type":"string","x-go-name":"ID"},"Image":{"description":"The name of the image used when creating this container","type":"string"},"ImageID":{"description":"The ID of the image that this container was created from","type":"string"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Mounts":{"type":"array","items":{"$ref":"#/definitions/Mount"}},"Names":{"description":"The names that this container has been given","type":"array","items":{"type":"string"}},"NetworkSettings":{"description":"A summary of the container's network settings","type":"object","properties":{"Networks":{"type":"object","additionalProperties":{"$ref":"#/definitions/EndpointSettings"}}}},"Ports":{"description":"The ports exposed by this container","type":"array","items":{"$ref":"#/definitions/Port"}},"SizeRootFs":{"description":"The total size of all the files in this container","type":"integer","format":"int64"},"SizeRw":{"description":"The size of files that have been created or changed by this container","type":"integer","format":"int64"},"State":{"description":"The state of this container (e.g. `Exited`)","type":"string"},"Status":{"description":"Additional human-readable status of this container (e.g. `Exit 0`)","type":"string"}}}},"CreateImageInfo":{"type":"object","properties":{"error":{"type":"string"},"id":{"type":"string"},"progress":{"type":"string"},"progressDetail":{"$ref":"#/definitions/ProgressDetail"},"status":{"type":"string"}}},"DeviceMapping":{"description":"A device mapping between the host and container","type":"object","properties":{"CgroupPermissions":{"type":"string"},"PathInContainer":{"type":"string"},"PathOnHost":{"type":"string"}},"example":{"CgroupPermissions":"mrw","PathInContainer":"/dev/deviceName","PathOnHost":"/dev/deviceName"}},"Driver":{"description":"Driver represents a driver (network, logging, secrets).","type":"object","required":["Name"],"properties":{"Name":{"description":"Name of the driver.","type":"string","x-nullable":false,"example":"some-driver"},"Options":{"description":"Key/value map of driver-specific options.","type":"object","additionalProperties":{"type":"string"},"x-nullable":false,"example":{"OptionA":"value for driver-specific option A","OptionB":"value for driver-specific option B"}}}},"EndpointIPAMConfig":{"description":"EndpointIPAMConfig represents an endpoint's IPAM configuration.\n","type":"object","properties":{"IPv4Address":{"type":"string","example":"172.20.30.33"},"IPv6Address":{"type":"string","example":"2001:db8:abcd::3033"},"LinkLocalIPs":{"type":"array","items":{"type":"string"},"example":["169.254.34.68","fe80::3468"]}},"x-nullable":true},"EndpointPortConfig":{"type":"object","properties":{"Name":{"type":"string"},"Protocol":{"type":"string","enum":["tcp","udp","sctp"]},"PublishMode":{"description":"The mode in which port is published.\n\n


    \n\n- \"ingress\" makes the target port accessible on on every node,\n regardless of whether there is a task for the service running on\n that node or not.\n- \"host\" bypasses the routing mesh and publish the port directly on\n the swarm node where that service is running.\n","type":"string","default":"ingress","enum":["ingress","host"],"example":"ingress"},"PublishedPort":{"description":"The port on the swarm hosts.","type":"integer"},"TargetPort":{"description":"The port inside the container.","type":"integer"}}},"EndpointSettings":{"description":"Configuration for a network endpoint.","type":"object","properties":{"Aliases":{"type":"array","items":{"type":"string"},"example":["server_x","server_y"]},"DriverOpts":{"description":"DriverOpts is a mapping of driver options and values. These options\nare passed directly to the driver and are driver specific.\n","type":"object","additionalProperties":{"type":"string"},"x-nullable":true,"example":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"}},"EndpointID":{"description":"Unique ID for the service endpoint in a Sandbox.\n","type":"string","example":"b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"},"Gateway":{"description":"Gateway address for this network.\n","type":"string","example":"172.17.0.1"},"GlobalIPv6Address":{"description":"Global IPv6 address.\n","type":"string","example":"2001:db8::5689"},"GlobalIPv6PrefixLen":{"description":"Mask length of the global IPv6 address.\n","type":"integer","format":"int64","example":64},"IPAMConfig":{"$ref":"#/definitions/EndpointIPAMConfig"},"IPAddress":{"description":"IPv4 address.\n","type":"string","example":"172.17.0.4"},"IPPrefixLen":{"description":"Mask length of the IPv4 address.\n","type":"integer","example":16},"IPv6Gateway":{"description":"IPv6 gateway address.\n","type":"string","example":"2001:db8:2::100"},"Links":{"type":"array","items":{"type":"string"},"example":["container_1","container_2"]},"MacAddress":{"description":"MAC address for the endpoint on this network.\n","type":"string","example":"02:42:ac:11:00:04"},"NetworkID":{"description":"Unique ID of the network.\n","type":"string","example":"08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"}}},"EndpointSpec":{"description":"Properties that can be configured to access and load balance a service.","type":"object","properties":{"Mode":{"description":"The mode of resolution to use for internal load balancing between tasks.","type":"string","default":"vip","enum":["vip","dnsrr"]},"Ports":{"description":"List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used.","type":"array","items":{"$ref":"#/definitions/EndpointPortConfig"}}}},"EngineDescription":{"description":"EngineDescription provides information about an engine.","type":"object","properties":{"EngineVersion":{"type":"string","example":"17.06.0"},"Labels":{"type":"object","additionalProperties":{"type":"string"},"example":{"foo":"bar"}},"Plugins":{"type":"array","items":{"type":"object","properties":{"Name":{"type":"string"},"Type":{"type":"string"}}},"example":[{"Name":"awslogs","Type":"Log"},{"Name":"fluentd","Type":"Log"},{"Name":"gcplogs","Type":"Log"},{"Name":"gelf","Type":"Log"},{"Name":"journald","Type":"Log"},{"Name":"json-file","Type":"Log"},{"Name":"logentries","Type":"Log"},{"Name":"splunk","Type":"Log"},{"Name":"syslog","Type":"Log"},{"Name":"bridge","Type":"Network"},{"Name":"host","Type":"Network"},{"Name":"ipvlan","Type":"Network"},{"Name":"macvlan","Type":"Network"},{"Name":"null","Type":"Network"},{"Name":"overlay","Type":"Network"},{"Name":"local","Type":"Volume"},{"Name":"localhost:5000/vieux/sshfs:latest","Type":"Volume"},{"Name":"vieux/sshfs:latest","Type":"Volume"}]}}},"ErrorDetail":{"type":"object","properties":{"code":{"type":"integer"},"message":{"type":"string"}}},"ErrorResponse":{"description":"Represents an error.","type":"object","required":["message"],"properties":{"message":{"description":"The error message.","type":"string","x-nullable":false}},"example":{"message":"Something went wrong."}},"GenericResources":{"description":"User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)","type":"array","items":{"type":"object","properties":{"DiscreteResourceSpec":{"type":"object","properties":{"Kind":{"type":"string"},"Value":{"type":"integer","format":"int64"}}},"NamedResourceSpec":{"type":"object","properties":{"Kind":{"type":"string"},"Value":{"type":"string"}}}}},"example":[{"DiscreteResourceSpec":{"Kind":"SSD","Value":3}},{"NamedResourceSpec":{"Kind":"GPU","Value":"UUID1"}},{"NamedResourceSpec":{"Kind":"GPU","Value":"UUID2"}}]},"GraphDriverData":{"description":"Information about a container's graph driver.","type":"object","required":["Name","Data"],"properties":{"Data":{"type":"object","additionalProperties":{"type":"string"},"x-nullable":false},"Name":{"type":"string","x-nullable":false}}},"HealthConfig":{"description":"A test to perform to check that the container is healthy.","type":"object","properties":{"Interval":{"description":"The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit.","type":"integer"},"Retries":{"description":"The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit.","type":"integer"},"StartPeriod":{"description":"Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit.","type":"integer"},"Test":{"description":"The test to perform. Possible values are:\n\n- `[]` inherit healthcheck from image or parent image\n- `[\"NONE\"]` disable healthcheck\n- `[\"CMD\", args...]` exec arguments directly\n- `[\"CMD-SHELL\", command]` run command with system's default shell\n","type":"array","items":{"type":"string"}},"Timeout":{"description":"The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit.","type":"integer"}}},"HostConfig":{"description":"Container configuration that depends on the host we are running on","allOf":[{"$ref":"#/definitions/Resources"},{"type":"object","properties":{"AutoRemove":{"description":"Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set.","type":"boolean"},"Binds":{"description":"A list of volume bindings for this container. Each volume binding is a string in one of these forms:\n\n- `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path.\n- `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path.\n- `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path.\n- `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path.\n","type":"array","items":{"type":"string"}},"CapAdd":{"description":"A list of kernel capabilities to add to the container.","type":"array","items":{"type":"string"}},"CapDrop":{"description":"A list of kernel capabilities to drop from the container.","type":"array","items":{"type":"string"}},"Cgroup":{"description":"Cgroup to use for the container.","type":"string"},"ConsoleSize":{"description":"Initial console size, as an `[height, width]` array. (Windows only)","type":"array","maxItems":2,"minItems":2,"items":{"type":"integer","minimum":0}},"ContainerIDFile":{"description":"Path to a file where the container ID is written","type":"string"},"Dns":{"description":"A list of DNS servers for the container to use.","type":"array","items":{"type":"string"}},"DnsOptions":{"description":"A list of DNS options.","type":"array","items":{"type":"string"}},"DnsSearch":{"description":"A list of DNS search domains.","type":"array","items":{"type":"string"}},"ExtraHosts":{"description":"A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `[\"hostname:IP\"]`.\n","type":"array","items":{"type":"string"}},"GroupAdd":{"description":"A list of additional groups that the container process will run as.","type":"array","items":{"type":"string"}},"IpcMode":{"description":"IPC sharing mode for the container. Possible values are:\n\n- `\"none\"`: own private IPC namespace, with /dev/shm not mounted\n- `\"private\"`: own private IPC namespace\n- `\"shareable\"`: own private IPC namespace, with a possibility to share it with other containers\n- `\"container:\"`: join another (shareable) container's IPC namespace\n- `\"host\"`: use the host system's IPC namespace\n\nIf not specified, daemon default is used, which can either be `\"private\"`\nor `\"shareable\"`, depending on daemon version and configuration.\n","type":"string"},"Isolation":{"description":"Isolation technology of the container. (Windows only)","type":"string","enum":["default","process","hyperv"]},"Links":{"description":"A list of links for the container in the form `container_name:alias`.","type":"array","items":{"type":"string"}},"LogConfig":{"description":"The logging configuration for this container","type":"object","properties":{"Config":{"type":"object","additionalProperties":{"type":"string"}},"Type":{"type":"string","enum":["json-file","syslog","journald","gelf","fluentd","awslogs","splunk","etwlogs","none"]}}},"MaskedPaths":{"description":"The list of paths to be masked inside the container (this overrides the default set of paths)","type":"array","items":{"type":"string"}},"Mounts":{"description":"Specification for mounts to be added to the container.","type":"array","items":{"$ref":"#/definitions/Mount"}},"NetworkMode":{"description":"Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken as a custom network's name to which this container should connect to.","type":"string"},"OomScoreAdj":{"description":"An integer value containing the score given to the container in order to tune OOM killer preferences.","type":"integer","example":500},"PidMode":{"description":"Set the PID (Process) Namespace mode for the container. It can be either:\n\n- `\"container:\"`: joins another container's PID namespace\n- `\"host\"`: use the host's PID namespace inside the container\n","type":"string"},"PortBindings":{"$ref":"#/definitions/PortMap"},"Privileged":{"description":"Gives the container full access to the host.","type":"boolean"},"PublishAllPorts":{"description":"Allocates an ephemeral host port for all of a container's\nexposed ports.\n\nPorts are de-allocated when the container stops and allocated when the container starts.\nThe allocated port might be changed when restarting the container.\n\nThe port is selected from the ephemeral port range that depends on the kernel.\nFor example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.\n","type":"boolean"},"ReadonlyPaths":{"description":"The list of paths to be set as read-only inside the container (this overrides the default set of paths)","type":"array","items":{"type":"string"}},"ReadonlyRootfs":{"description":"Mount the container's root filesystem as read only.","type":"boolean"},"RestartPolicy":{"$ref":"#/definitions/RestartPolicy"},"Runtime":{"description":"Runtime to use with this container.","type":"string"},"SecurityOpt":{"description":"A list of string values to customize labels for MLS systems, such as SELinux.","type":"array","items":{"type":"string"}},"ShmSize":{"description":"Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.","type":"integer","minimum":0},"StorageOpt":{"description":"Storage driver options for this container, in the form `{\"size\": \"120G\"}`.\n","type":"object","additionalProperties":{"type":"string"}},"Sysctls":{"description":"A list of kernel parameters (sysctls) to set in the container. For example: `{\"net.ipv4.ip_forward\": \"1\"}`\n","type":"object","additionalProperties":{"type":"string"}},"Tmpfs":{"description":"A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ \"/run\": \"rw,noexec,nosuid,size=65536k\" }`.\n","type":"object","additionalProperties":{"type":"string"}},"UTSMode":{"description":"UTS namespace to use for the container.","type":"string"},"UsernsMode":{"description":"Sets the usernamespace mode for the container when usernamespace remapping option is enabled.","type":"string"},"VolumeDriver":{"description":"Driver that this container uses to mount volumes.","type":"string"},"VolumesFrom":{"description":"A list of volumes to inherit from another container, specified in the form `[:]`.","type":"array","items":{"type":"string"}}}}]},"IPAM":{"type":"object","properties":{"Config":{"description":"List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`","type":"array","items":{"type":"object","additionalProperties":{"type":"string"}}},"Driver":{"description":"Name of the IPAM driver to use.","type":"string","default":"default"},"Options":{"description":"Driver-specific options, specified as a map.","type":"array","items":{"type":"object","additionalProperties":{"type":"string"}}}}},"IdResponse":{"description":"Response to an API call that returns just an Id","type":"object","required":["Id"],"properties":{"Id":{"description":"The id of the newly created object.","type":"string","x-nullable":false}}},"Image":{"type":"object","required":["Id","Parent","Comment","Created","Container","DockerVersion","Author","Architecture","Os","Size","VirtualSize","GraphDriver","RootFS"],"properties":{"Architecture":{"type":"string","x-nullable":false},"Author":{"type":"string","x-nullable":false},"Comment":{"type":"string","x-nullable":false},"Config":{"$ref":"#/definitions/ContainerConfig"},"Container":{"type":"string","x-nullable":false},"ContainerConfig":{"$ref":"#/definitions/ContainerConfig"},"Created":{"type":"string","x-nullable":false},"DockerVersion":{"type":"string","x-nullable":false},"GraphDriver":{"$ref":"#/definitions/GraphDriverData"},"Id":{"type":"string","x-nullable":false},"Metadata":{"type":"object","properties":{"LastTagTime":{"type":"string","format":"dateTime"}}},"Os":{"type":"string","x-nullable":false},"OsVersion":{"type":"string"},"Parent":{"type":"string","x-nullable":false},"RepoDigests":{"type":"array","items":{"type":"string"}},"RepoTags":{"type":"array","items":{"type":"string"}},"RootFS":{"type":"object","required":["Type"],"properties":{"BaseLayer":{"type":"string"},"Layers":{"type":"array","items":{"type":"string"}},"Type":{"type":"string","x-nullable":false}}},"Size":{"type":"integer","format":"int64","x-nullable":false},"VirtualSize":{"type":"integer","format":"int64","x-nullable":false}}},"ImageDeleteResponseItem":{"type":"object","properties":{"Deleted":{"description":"The image ID of an image that was deleted","type":"string"},"Untagged":{"description":"The image ID of an image that was untagged","type":"string"}}},"ImageID":{"description":"Image ID or Digest","type":"object","properties":{"ID":{"type":"string"}},"example":{"ID":"sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"}},"ImageSummary":{"type":"object","required":["Id","ParentId","RepoTags","RepoDigests","Created","Size","SharedSize","VirtualSize","Labels","Containers"],"properties":{"Containers":{"type":"integer","x-nullable":false},"Created":{"type":"integer","x-nullable":false},"Id":{"type":"string","x-nullable":false},"Labels":{"type":"object","additionalProperties":{"type":"string"},"x-nullable":false},"ParentId":{"type":"string","x-nullable":false},"RepoDigests":{"type":"array","items":{"type":"string"},"x-nullable":false},"RepoTags":{"type":"array","items":{"type":"string"},"x-nullable":false},"SharedSize":{"type":"integer","x-nullable":false},"Size":{"type":"integer","x-nullable":false},"VirtualSize":{"type":"integer","x-nullable":false}}},"IndexInfo":{"description":"IndexInfo contains information about a registry.","type":"object","properties":{"Mirrors":{"description":"List of mirrors, expressed as URIs.\n","type":"array","items":{"type":"string"},"example":["https://hub-mirror.corp.example.com:5000/","https://registry-2.docker.io/","https://registry-3.docker.io/"]},"Name":{"description":"Name of the registry, such as \"docker.io\".\n","type":"string","example":"docker.io"},"Official":{"description":"Indicates whether this is an official registry (i.e., Docker Hub / docker.io)\n","type":"boolean","example":true},"Secure":{"description":"Indicates if the registry is part of the list of insecure\nregistries.\n\nIf `false`, the registry is insecure. Insecure registries accept\nun-encrypted (HTTP) and/or untrusted (HTTPS with certificates from\nunknown CAs) communication.\n\n> **Warning**: Insecure registries can be useful when running a local\n> registry. However, because its use creates security vulnerabilities\n> it should ONLY be enabled for testing purposes. For increased\n> security, users should add their CA to their system's list of\n> trusted CAs instead of enabling this option.\n","type":"boolean","example":true}},"x-nullable":true},"JoinTokens":{"description":"JoinTokens contains the tokens workers and managers need to join the swarm.\n","type":"object","properties":{"Manager":{"description":"The token managers can use to join the swarm.\n","type":"string","example":"SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"},"Worker":{"description":"The token workers can use to join the swarm.\n","type":"string","example":"SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"}}},"LocalNodeState":{"description":"Current local status of this node.","type":"string","default":"","enum":["","inactive","pending","active","error","locked"],"example":"active"},"ManagerStatus":{"description":"ManagerStatus represents the status of a manager.\n\nIt provides the current status of a node's manager component, if the node\nis a manager.\n","type":"object","properties":{"Addr":{"description":"The IP address and port at which the manager is reachable.\n","type":"string","example":"10.0.0.46:2377"},"Leader":{"type":"boolean","default":false,"example":true},"Reachability":{"$ref":"#/definitions/Reachability"}},"x-nullable":true},"Mount":{"type":"object","properties":{"BindOptions":{"description":"Optional configuration for the `bind` type.","type":"object","properties":{"Propagation":{"description":"A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.","type":"string","enum":["private","rprivate","shared","rshared","slave","rslave"]}}},"Consistency":{"description":"The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`.","type":"string"},"ReadOnly":{"description":"Whether the mount should be read-only.","type":"boolean"},"Source":{"description":"Mount source (e.g. a volume name, a host path).","type":"string"},"Target":{"description":"Container path.","type":"string"},"TmpfsOptions":{"description":"Optional configuration for the `tmpfs` type.","type":"object","properties":{"Mode":{"description":"The permission mode for the tmpfs mount in an integer.","type":"integer"},"SizeBytes":{"description":"The size for the tmpfs mount in bytes.","type":"integer","format":"int64"}}},"Type":{"description":"The mount type. Available types:\n\n- `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.\n- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.\n- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.\n","type":"string","enum":["bind","volume","tmpfs"]},"VolumeOptions":{"description":"Optional configuration for the `volume` type.","type":"object","properties":{"DriverConfig":{"description":"Map of driver specific options","type":"object","properties":{"Name":{"description":"Name of the driver to use to create the volume.","type":"string"},"Options":{"description":"key/value map of driver specific options.","type":"object","additionalProperties":{"type":"string"}}}},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"NoCopy":{"description":"Populate volume with data from the target.","type":"boolean","default":false}}}}},"MountPoint":{"description":"A mount point inside a container","type":"object","properties":{"Destination":{"type":"string"},"Driver":{"type":"string"},"Mode":{"type":"string"},"Name":{"type":"string"},"Propagation":{"type":"string"},"RW":{"type":"boolean"},"Source":{"type":"string"},"Type":{"type":"string"}}},"Network":{"type":"object","properties":{"Attachable":{"type":"boolean"},"Containers":{"type":"object","additionalProperties":{"$ref":"#/definitions/NetworkContainer"}},"Created":{"type":"string","format":"dateTime"},"Driver":{"type":"string"},"EnableIPv6":{"type":"boolean"},"IPAM":{"$ref":"#/definitions/IPAM"},"Id":{"type":"string"},"Ingress":{"type":"boolean"},"Internal":{"type":"boolean"},"Labels":{"type":"object","additionalProperties":{"type":"string"}},"Name":{"type":"string"},"Options":{"type":"object","additionalProperties":{"type":"string"}},"Scope":{"type":"string"}},"example":{"Attachable":false,"Containers":{"19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c":{"EndpointID":"628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a","IPv4Address":"172.19.0.2/16","IPv6Address":"","MacAddress":"02:42:ac:13:00:02","Name":"test"}},"Created":"2016-10-19T04:33:30.360899459Z","Driver":"bridge","EnableIPv6":false,"IPAM":{"Config":[{"Gateway":"172.19.0.1","Subnet":"172.19.0.0/16"}],"Driver":"default","Options":{"foo":"bar"}},"Id":"7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99","Ingress":false,"Internal":false,"Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Name":"net01","Options":{"com.docker.network.bridge.default_bridge":"true","com.docker.network.bridge.enable_icc":"true","com.docker.network.bridge.enable_ip_masquerade":"true","com.docker.network.bridge.host_binding_ipv4":"0.0.0.0","com.docker.network.bridge.name":"docker0","com.docker.network.driver.mtu":"1500"},"Scope":"local"}},"NetworkContainer":{"type":"object","properties":{"EndpointID":{"type":"string"},"IPv4Address":{"type":"string"},"IPv6Address":{"type":"string"},"MacAddress":{"type":"string"},"Name":{"type":"string"}}},"NetworkSettings":{"description":"NetworkSettings exposes the network settings in the API","type":"object","properties":{"Bridge":{"description":"Name of the network'a bridge (for example, `docker0`).","type":"string","example":"docker0"},"EndpointID":{"description":"EndpointID uniquely represents a service endpoint in a Sandbox.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"},"Gateway":{"description":"Gateway address for the default \"bridge\" network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"172.17.0.1"},"GlobalIPv6Address":{"description":"Global IPv6 address for the default \"bridge\" network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"2001:db8::5689"},"GlobalIPv6PrefixLen":{"description":"Mask length of the global IPv6 address.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"integer","example":64},"HairpinMode":{"description":"Indicates if hairpin NAT should be enabled on the virtual interface.\n","type":"boolean","example":false},"IPAddress":{"description":"IPv4 address for the default \"bridge\" network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"172.17.0.4"},"IPPrefixLen":{"description":"Mask length of the IPv4 address.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"integer","example":16},"IPv6Gateway":{"description":"IPv6 gateway address for this network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"2001:db8:2::100"},"LinkLocalIPv6Address":{"description":"IPv6 unicast address using the link-local prefix.","type":"string","example":"fe80::42:acff:fe11:1"},"LinkLocalIPv6PrefixLen":{"description":"Prefix length of the IPv6 unicast address.","type":"integer","example":"64"},"MacAddress":{"description":"MAC address for the container on the default \"bridge\" network.\n\n


    \n\n> **Deprecated**: This field is only propagated when attached to the\n> default \"bridge\" network. Use the information from the \"bridge\"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n","type":"string","example":"02:42:ac:11:00:04"},"Networks":{"description":"Information about all networks that the container is connected to.\n","type":"object","additionalProperties":{"$ref":"#/definitions/EndpointSettings"}},"Ports":{"$ref":"#/definitions/PortMap"},"SandboxID":{"description":"SandboxID uniquely represents a container's network stack.","type":"string","example":"9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"},"SandboxKey":{"description":"SandboxKey identifies the sandbox","type":"string","example":"/var/run/docker/netns/8ab54b426c38"},"SecondaryIPAddresses":{"type":"array","items":{"$ref":"#/definitions/Address"},"x-nullable":true},"SecondaryIPv6Addresses":{"type":"array","items":{"$ref":"#/definitions/Address"},"x-nullable":true}}},"Node":{"type":"object","properties":{"CreatedAt":{"description":"Date and time at which the node was added to the swarm in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n","type":"string","format":"dateTime","example":"2016-08-18T10:44:24.496525531Z"},"Description":{"$ref":"#/definitions/NodeDescription"},"ID":{"type":"string","example":"24ifsmvkjbyhk"},"ManagerStatus":{"$ref":"#/definitions/ManagerStatus"},"Spec":{"$ref":"#/definitions/NodeSpec"},"Status":{"$ref":"#/definitions/NodeStatus"},"UpdatedAt":{"description":"Date and time at which the node was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n","type":"string","format":"dateTime","example":"2017-08-09T07:09:37.632105588Z"},"Version":{"$ref":"#/definitions/ObjectVersion"}}},"NodeDescription":{"description":"NodeDescription encapsulates the properties of the Node as reported by the\nagent.\n","type":"object","properties":{"Engine":{"$ref":"#/definitions/EngineDescription"},"Hostname":{"type":"string","example":"bf3067039e47"},"Platform":{"$ref":"#/definitions/Platform"},"Resources":{"$ref":"#/definitions/ResourceObject"},"TLSInfo":{"$ref":"#/definitions/TLSInfo"}}},"NodeSpec":{"type":"object","properties":{"Availability":{"description":"Availability of the node.","type":"string","enum":["active","pause","drain"],"example":"active"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"Name for the node.","type":"string","example":"my-node"},"Role":{"description":"Role of the node.","type":"string","enum":["worker","manager"],"example":"manager"}},"example":{"Availability":"active","Labels":{"foo":"bar"},"Name":"node-name","Role":"manager"}},"NodeState":{"description":"NodeState represents the state of a node.","type":"string","enum":["unknown","down","ready","disconnected"],"example":"ready"},"NodeStatus":{"description":"NodeStatus represents the status of a node.\n\nIt provides the current status of the node, as seen by the manager.\n","type":"object","properties":{"Addr":{"description":"IP address of the node.","type":"string","example":"172.17.0.2"},"Message":{"type":"string","example":""},"State":{"$ref":"#/definitions/NodeState"}}},"ObjectVersion":{"description":"The version number of the object such as node, service, etc. This is needed to avoid conflicting writes.\nThe client must send the version number along with the modified specification when updating these objects.\nThis approach ensures safe concurrency and determinism in that the change on the object\nmay not be applied if the version number has changed from the last read. In other words,\nif two update requests specify the same base version, only one of the requests can succeed.\nAs a result, two separate update requests that happen at the same time will not\nunintentionally overwrite each other.\n","type":"object","properties":{"Index":{"type":"integer","format":"uint64","example":373531}}},"PeerNode":{"description":"Represents a peer-node in the swarm","properties":{"Addr":{"description":"IP address and ports at which this node can be reached.\n","type":"string"},"NodeID":{"description":"Unique identifier of for this node in the swarm.","type":"string"}}},"Platform":{"description":"Platform represents the platform (Arch/OS).\n","type":"object","properties":{"Architecture":{"description":"Architecture represents the hardware architecture (for example,\n`x86_64`).\n","type":"string","example":"x86_64"},"OS":{"description":"OS represents the Operating System (for example, `linux` or `windows`).\n","type":"string","example":"linux"}}},"Plugin":{"description":"A plugin for the Engine API","type":"object","required":["Settings","Enabled","Config","Name"],"properties":{"Config":{"description":"The config of a plugin.","type":"object","required":["Description","Documentation","Interface","Entrypoint","WorkDir","Network","Linux","PidHost","PropagatedMount","IpcHost","Mounts","Env","Args"],"properties":{"Args":{"type":"object","required":["Name","Description","Settable","Value"],"properties":{"Description":{"type":"string","x-nullable":false,"example":"command line arguments"},"Name":{"type":"string","x-nullable":false,"example":"args"},"Settable":{"type":"array","items":{"type":"string"}},"Value":{"type":"array","items":{"type":"string"}}},"x-nullable":false},"Description":{"type":"string","x-nullable":false,"example":"A sample volume plugin for Docker"},"DockerVersion":{"description":"Docker Version used to create the plugin","type":"string","x-nullable":false,"example":"17.06.0-ce"},"Documentation":{"type":"string","x-nullable":false,"example":"https://docs.docker.com/engine/extend/plugins/"},"Entrypoint":{"type":"array","items":{"type":"string"},"example":["/usr/bin/sample-volume-plugin","/data"]},"Env":{"type":"array","items":{"$ref":"#/definitions/PluginEnv"},"example":[{"Description":"If set, prints debug messages","Name":"DEBUG","Settable":null,"Value":"0"}]},"Interface":{"description":"The interface between Docker and the plugin","type":"object","required":["Types","Socket"],"properties":{"ProtocolScheme":{"description":"Protocol to use for clients connecting to the plugin.","type":"string","enum":["","moby.plugins.http/v1"],"example":"some.protocol/v1.0"},"Socket":{"type":"string","x-nullable":false,"example":"plugins.sock"},"Types":{"type":"array","items":{"$ref":"#/definitions/PluginInterfaceType"},"example":["docker.volumedriver/1.0"]}},"x-nullable":false},"IpcHost":{"type":"boolean","x-nullable":false,"example":false},"Linux":{"type":"object","required":["Capabilities","AllowAllDevices","Devices"],"properties":{"AllowAllDevices":{"type":"boolean","x-nullable":false,"example":false},"Capabilities":{"type":"array","items":{"type":"string"},"example":["CAP_SYS_ADMIN","CAP_SYSLOG"]},"Devices":{"type":"array","items":{"$ref":"#/definitions/PluginDevice"}}},"x-nullable":false},"Mounts":{"type":"array","items":{"$ref":"#/definitions/PluginMount"}},"Network":{"type":"object","required":["Type"],"properties":{"Type":{"type":"string","x-nullable":false,"example":"host"}},"x-nullable":false},"PidHost":{"type":"boolean","x-nullable":false,"example":false},"PropagatedMount":{"type":"string","x-nullable":false,"example":"/mnt/volumes"},"User":{"type":"object","properties":{"GID":{"type":"integer","format":"uint32","example":1000},"UID":{"type":"integer","format":"uint32","example":1000}},"x-nullable":false},"WorkDir":{"type":"string","x-nullable":false,"example":"/bin/"},"rootfs":{"type":"object","properties":{"diff_ids":{"type":"array","items":{"type":"string"},"example":["sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887","sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"]},"type":{"type":"string","example":"layers"}}}},"x-nullable":false},"Enabled":{"description":"True if the plugin is running. False if the plugin is not running, only installed.","type":"boolean","x-nullable":false,"example":true},"Id":{"type":"string","example":"5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"},"Name":{"type":"string","x-nullable":false,"example":"tiborvass/sample-volume-plugin"},"PluginReference":{"description":"plugin remote reference used to push/pull the plugin","type":"string","x-nullable":false,"example":"localhost:5000/tiborvass/sample-volume-plugin:latest"},"Settings":{"description":"Settings that can be modified by users.","type":"object","required":["Args","Devices","Env","Mounts"],"properties":{"Args":{"type":"array","items":{"type":"string"}},"Devices":{"type":"array","items":{"$ref":"#/definitions/PluginDevice"}},"Env":{"type":"array","items":{"type":"string"},"example":["DEBUG=0"]},"Mounts":{"type":"array","items":{"$ref":"#/definitions/PluginMount"}}},"x-nullable":false}}},"PluginDevice":{"type":"object","required":["Name","Description","Settable","Path"],"properties":{"Description":{"type":"string","x-nullable":false},"Name":{"type":"string","x-nullable":false},"Path":{"type":"string","example":"/dev/fuse"},"Settable":{"type":"array","items":{"type":"string"}}},"x-nullable":false},"PluginEnv":{"type":"object","required":["Name","Description","Settable","Value"],"properties":{"Description":{"type":"string","x-nullable":false},"Name":{"type":"string","x-nullable":false},"Settable":{"type":"array","items":{"type":"string"}},"Value":{"type":"string"}},"x-nullable":false},"PluginInterfaceType":{"type":"object","required":["Prefix","Capability","Version"],"properties":{"Capability":{"type":"string","x-nullable":false},"Prefix":{"type":"string","x-nullable":false},"Version":{"type":"string","x-nullable":false}},"x-nullable":false},"PluginMount":{"type":"object","required":["Name","Description","Settable","Source","Destination","Type","Options"],"properties":{"Description":{"type":"string","x-nullable":false,"example":"This is a mount that's used by the plugin."},"Destination":{"type":"string","x-nullable":false,"example":"/mnt/state"},"Name":{"type":"string","x-nullable":false,"example":"some-mount"},"Options":{"type":"array","items":{"type":"string"},"example":["rbind","rw"]},"Settable":{"type":"array","items":{"type":"string"}},"Source":{"type":"string","example":"/var/lib/docker/plugins/"},"Type":{"type":"string","x-nullable":false,"example":"bind"}},"x-nullable":false},"PluginsInfo":{"description":"Available plugins per type.\n\n


    \n\n> **Note**: Only unmanaged (V1) plugins are included in this list.\n> V1 plugins are \"lazily\" loaded, and are not returned in this list\n> if there is no resource using the plugin.\n","type":"object","properties":{"Authorization":{"description":"Names of available authorization plugins.","type":"array","items":{"type":"string"},"example":["img-authz-plugin","hbm"]},"Log":{"description":"Names of available logging-drivers, and logging-driver plugins.","type":"array","items":{"type":"string"},"example":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"Network":{"description":"Names of available network-drivers, and network-driver plugins.","type":"array","items":{"type":"string"},"example":["bridge","host","ipvlan","macvlan","null","overlay"]},"Volume":{"description":"Names of available volume-drivers, and network-driver plugins.","type":"array","items":{"type":"string"},"example":["local"]}}},"Port":{"description":"An open port on a container","type":"object","required":["PrivatePort","Type"],"properties":{"IP":{"description":"Host IP address that the container's port is mapped to","type":"string","format":"ip-address"},"PrivatePort":{"description":"Port on the container","type":"integer","format":"uint16","x-nullable":false},"PublicPort":{"description":"Port exposed on the host","type":"integer","format":"uint16"},"Type":{"type":"string","enum":["tcp","udp","sctp"],"x-nullable":false}},"example":{"PrivatePort":8080,"PublicPort":80,"Type":"tcp"}},"PortBinding":{"description":"PortBinding represents a binding between a host IP address and a host\nport.\n","type":"object","properties":{"HostIp":{"description":"Host IP address that the container's port is mapped to.","type":"string","example":"127.0.0.1"},"HostPort":{"description":"Host port number that the container's port is mapped to.","type":"string","example":"4443"}},"x-nullable":true},"PortMap":{"description":"PortMap describes the mapping of container ports to host ports, using the\ncontainer's port-number and protocol as key in the format `/`,\nfor example, `80/udp`.\n\nIf a container's port is mapped for multiple protocols, separate entries\nare added to the mapping table.\n","type":"object","additionalProperties":{"type":"array","items":{"$ref":"#/definitions/PortBinding"}},"example":{"2377/tcp":null,"443/tcp":[{"HostIp":"127.0.0.1","HostPort":"4443"}],"53/udp":[{"HostIp":"0.0.0.0","HostPort":"53"}],"80/tcp":[{"HostIp":"0.0.0.0","HostPort":"80"},{"HostIp":"0.0.0.0","HostPort":"8080"}],"80/udp":[{"HostIp":"0.0.0.0","HostPort":"80"}]}},"ProcessConfig":{"type":"object","properties":{"arguments":{"type":"array","items":{"type":"string"}},"entrypoint":{"type":"string"},"privileged":{"type":"boolean"},"tty":{"type":"boolean"},"user":{"type":"string"}}},"ProgressDetail":{"type":"object","properties":{"current":{"type":"integer"},"total":{"type":"integer"}}},"PushImageInfo":{"type":"object","properties":{"error":{"type":"string"},"progress":{"type":"string"},"progressDetail":{"$ref":"#/definitions/ProgressDetail"},"status":{"type":"string"}}},"Reachability":{"description":"Reachability represents the reachability of a node.","type":"string","enum":["unknown","unreachable","reachable"],"example":"reachable"},"RegistryServiceConfig":{"description":"RegistryServiceConfig stores daemon registry services configuration.\n","type":"object","properties":{"AllowNondistributableArtifactsCIDRs":{"description":"List of IP ranges to which nondistributable artifacts can be pushed,\nusing the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior, and enables the daemon to\npush nondistributable artifacts to all registries whose resolved IP\naddress is within the subnet described by the CIDR syntax.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n","type":"array","items":{"type":"string"},"example":["::1/128","127.0.0.0/8"]},"AllowNondistributableArtifactsHostnames":{"description":"List of registry hostnames to which nondistributable artifacts can be\npushed, using the format `[:]` or `[:]`.\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior for the specified\nregistries.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n","type":"array","items":{"type":"string"},"example":["registry.internal.corp.example.com:3000","[2001:db8:a0b:12f0::1]:443"]},"IndexConfigs":{"type":"object","additionalProperties":{"$ref":"#/definitions/IndexInfo"},"example":{"127.0.0.1:5000":{"Mirrors":[],"Name":"127.0.0.1:5000","Official":false,"Secure":false},"[2001:db8:a0b:12f0::1]:80":{"Mirrors":[],"Name":"[2001:db8:a0b:12f0::1]:80","Official":false,"Secure":false},"docker.io":{"Mirrors":["https://hub-mirror.corp.example.com:5000/"],"Name":"docker.io","Official":true,"Secure":true},"registry.internal.corp.example.com:3000":{"Mirrors":[],"Name":"registry.internal.corp.example.com:3000","Official":false,"Secure":false}}},"InsecureRegistryCIDRs":{"description":"List of IP ranges of insecure registries, using the CIDR syntax\n([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries\naccept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates\nfrom unknown CAs) communication.\n\nBy default, local registries (`127.0.0.0/8`) are configured as\ninsecure. All other registries are secure. Communicating with an\ninsecure registry is not possible if the daemon assumes that registry\nis secure.\n\nThis configuration override this behavior, insecure communication with\nregistries whose resolved IP address is within the subnet described by\nthe CIDR syntax.\n\nRegistries can also be marked insecure by hostname. Those registries\nare listed under `IndexConfigs` and have their `Secure` field set to\n`false`.\n\n> **Warning**: Using this option can be useful when running a local\n> registry, but introduces security vulnerabilities. This option\n> should therefore ONLY be used for testing purposes. For increased\n> security, users should add their CA to their system's list of trusted\n> CAs instead of enabling this option.\n","type":"array","items":{"type":"string"},"example":["::1/128","127.0.0.0/8"]},"Mirrors":{"description":"List of registry URLs that act as a mirror for the official\n(`docker.io`) registry.\n","type":"array","items":{"type":"string"},"example":["https://hub-mirror.corp.example.com:5000/","https://[2001:db8:a0b:12f0::1]/"]}},"x-nullable":true},"ResourceObject":{"description":"An object describing the resources which can be advertised by a node and requested by a task","type":"object","properties":{"GenericResources":{"$ref":"#/definitions/GenericResources"},"MemoryBytes":{"type":"integer","format":"int64","example":8272408576},"NanoCPUs":{"type":"integer","format":"int64","example":4000000000}}},"Resources":{"description":"A container's resources (cgroups config, ulimits, etc)","type":"object","properties":{"BlkioDeviceReadBps":{"description":"Limit read rate (bytes per second) from a device, in the form `[{\"Path\": \"device_path\", \"Rate\": rate}]`.\n","type":"array","items":{"$ref":"#/definitions/ThrottleDevice"}},"BlkioDeviceReadIOps":{"description":"Limit read rate (IO per second) from a device, in the form `[{\"Path\": \"device_path\", \"Rate\": rate}]`.\n","type":"array","items":{"$ref":"#/definitions/ThrottleDevice"}},"BlkioDeviceWriteBps":{"description":"Limit write rate (bytes per second) to a device, in the form `[{\"Path\": \"device_path\", \"Rate\": rate}]`.\n","type":"array","items":{"$ref":"#/definitions/ThrottleDevice"}},"BlkioDeviceWriteIOps":{"description":"Limit write rate (IO per second) to a device, in the form `[{\"Path\": \"device_path\", \"Rate\": rate}]`.\n","type":"array","items":{"$ref":"#/definitions/ThrottleDevice"}},"BlkioWeight":{"description":"Block IO weight (relative weight).","type":"integer","maximum":1000,"minimum":0},"BlkioWeightDevice":{"description":"Block IO weight (relative device weight) in the form `[{\"Path\": \"device_path\", \"Weight\": weight}]`.\n","type":"array","items":{"type":"object","properties":{"Path":{"type":"string"},"Weight":{"type":"integer","minimum":0}}}},"CgroupParent":{"description":"Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist.","type":"string"},"CpuCount":{"description":"The number of usable CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.\n","type":"integer","format":"int64"},"CpuPercent":{"description":"The usable percentage of the available CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.\n","type":"integer","format":"int64"},"CpuPeriod":{"description":"The length of a CPU period in microseconds.","type":"integer","format":"int64"},"CpuQuota":{"description":"Microseconds of CPU time that the container can get in a CPU period.","type":"integer","format":"int64"},"CpuRealtimePeriod":{"description":"The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks.","type":"integer","format":"int64"},"CpuRealtimeRuntime":{"description":"The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks.","type":"integer","format":"int64"},"CpuShares":{"description":"An integer value representing this container's relative CPU weight versus other containers.","type":"integer"},"CpusetCpus":{"description":"CPUs in which to allow execution (e.g., `0-3`, `0,1`)","type":"string","example":"0-3"},"CpusetMems":{"description":"Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.","type":"string"},"DeviceCgroupRules":{"description":"a list of cgroup rules to apply to the container","type":"array","items":{"type":"string","example":"c 13:* rwm"}},"Devices":{"description":"A list of devices to add to the container.","type":"array","items":{"$ref":"#/definitions/DeviceMapping"}},"DiskQuota":{"description":"Disk limit (in bytes).","type":"integer","format":"int64"},"IOMaximumBandwidth":{"description":"Maximum IO in bytes per second for the container system drive (Windows only)","type":"integer","format":"int64"},"IOMaximumIOps":{"description":"Maximum IOps for the container system drive (Windows only)","type":"integer","format":"int64"},"Init":{"description":"Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used.","type":"boolean","x-nullable":true},"KernelMemory":{"description":"Kernel memory limit in bytes.","type":"integer","format":"int64"},"Memory":{"description":"Memory limit in bytes.","type":"integer","format":"int64","default":0},"MemoryReservation":{"description":"Memory soft limit in bytes.","type":"integer","format":"int64"},"MemorySwap":{"description":"Total memory limit (memory + swap). Set as `-1` to enable unlimited swap.","type":"integer","format":"int64"},"MemorySwappiness":{"description":"Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.","type":"integer","format":"int64","maximum":100,"minimum":0},"NanoCPUs":{"description":"CPU quota in units of 10-9 CPUs.","type":"integer","format":"int64"},"OomKillDisable":{"description":"Disable OOM Killer for the container.","type":"boolean"},"PidsLimit":{"description":"Tune a container's pids limit. Set -1 for unlimited.","type":"integer","format":"int64"},"Ulimits":{"description":"A list of resource limits to set in the container. For example: `{\"Name\": \"nofile\", \"Soft\": 1024, \"Hard\": 2048}`\"\n","type":"array","items":{"type":"object","properties":{"Hard":{"description":"Hard limit","type":"integer"},"Name":{"description":"Name of ulimit","type":"string"},"Soft":{"description":"Soft limit","type":"integer"}}}}}},"RestartPolicy":{"description":"The behavior to apply when the container exits. The default is not to restart.\n\nAn ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server.\n","type":"object","properties":{"MaximumRetryCount":{"description":"If `on-failure` is used, the number of times to retry before giving up","type":"integer"},"Name":{"description":"- Empty string means not to restart\n- `always` Always restart\n- `unless-stopped` Restart always except when the user has manually stopped the container\n- `on-failure` Restart only when the container exit code is non-zero\n","type":"string","enum":["","always","unless-stopped","on-failure"]}}},"Runtime":{"description":"Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec)\nruntime.\n\nThe runtime is invoked by the daemon via the `containerd` daemon. OCI\nruntimes act as an interface to the Linux kernel namespaces, cgroups,\nand SELinux.\n","type":"object","properties":{"path":{"description":"Name and, optional, path, of the OCI executable binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n","type":"string","example":"/usr/local/bin/my-oci-runtime"},"runtimeArgs":{"description":"List of command-line arguments to pass to the runtime when invoked.\n","type":"array","items":{"type":"string"},"x-nullable":true,"example":["--debug","--systemd-cgroup=false"]}}},"Secret":{"type":"object","properties":{"CreatedAt":{"type":"string","format":"dateTime","example":"2017-07-20T13:55:28.678958722Z"},"ID":{"type":"string","example":"blt1owaxmitz71s9v5zh81zun"},"Spec":{"$ref":"#/definitions/SecretSpec"},"UpdatedAt":{"type":"string","format":"dateTime","example":"2017-07-20T13:55:28.678958722Z"},"Version":{"$ref":"#/definitions/ObjectVersion"}}},"SecretSpec":{"type":"object","properties":{"Data":{"description":"Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))\ndata to store as secret.\n\nThis field is only used to _create_ a secret, and is not returned by\nother endpoints.\n","type":"string","example":""},"Driver":{"description":"Name of the secrets driver used to fetch the secret's value from an external secret store","$ref":"#/definitions/Driver"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"},"example":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"}},"Name":{"description":"User-defined name of the secret.","type":"string"},"Templating":{"description":"Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n","$ref":"#/definitions/Driver"}}},"Service":{"type":"object","properties":{"CreatedAt":{"type":"string","format":"dateTime"},"Endpoint":{"type":"object","properties":{"Ports":{"type":"array","items":{"$ref":"#/definitions/EndpointPortConfig"}},"Spec":{"$ref":"#/definitions/EndpointSpec"},"VirtualIPs":{"type":"array","items":{"type":"object","properties":{"Addr":{"type":"string"},"NetworkID":{"type":"string"}}}}}},"ID":{"type":"string"},"Spec":{"$ref":"#/definitions/ServiceSpec"},"UpdateStatus":{"description":"The status of a service update.","type":"object","properties":{"CompletedAt":{"type":"string","format":"dateTime"},"Message":{"type":"string"},"StartedAt":{"type":"string","format":"dateTime"},"State":{"type":"string","enum":["updating","paused","completed"]}}},"UpdatedAt":{"type":"string","format":"dateTime"},"Version":{"$ref":"#/definitions/ObjectVersion"}},"example":{"CreatedAt":"2016-06-07T21:05:51.880065305Z","Endpoint":{"Ports":[{"Protocol":"tcp","PublishedPort":30001,"TargetPort":6379}],"Spec":{"Mode":"vip","Ports":[{"Protocol":"tcp","PublishedPort":30001,"TargetPort":6379}]},"VirtualIPs":[{"Addr":"10.255.0.2/16","NetworkID":"4qvuz4ko70xaltuqbt8956gd1"},{"Addr":"10.255.0.3/16","NetworkID":"4qvuz4ko70xaltuqbt8956gd1"}]},"ID":"9mnpnzenvg8p8tdbtq4wvbkcz","Spec":{"EndpointSpec":{"Mode":"vip","Ports":[{"Protocol":"tcp","PublishedPort":30001,"TargetPort":6379}]},"Mode":{"Replicated":{"Replicas":1}},"Name":"hopeful_cori","RollbackConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":1},"TaskTemplate":{"ContainerSpec":{"Image":"redis"},"ForceUpdate":0,"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"UpdateConfig":{"Delay":1000000000,"FailureAction":"pause","MaxFailureRatio":0.15,"Monitor":15000000000,"Parallelism":1}},"UpdatedAt":"2016-06-07T21:07:29.962229872Z","Version":{"Index":19}}},"ServiceSpec":{"description":"User modifiable configuration for a service.","properties":{"EndpointSpec":{"$ref":"#/definitions/EndpointSpec"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Mode":{"description":"Scheduling mode for the service.","type":"object","properties":{"Global":{"type":"object"},"Replicated":{"type":"object","properties":{"Replicas":{"type":"integer","format":"int64"}}}}},"Name":{"description":"Name of the service.","type":"string"},"Networks":{"description":"Array of network names or IDs to attach the service to.","type":"array","items":{"type":"object","properties":{"Aliases":{"type":"array","items":{"type":"string"}},"Target":{"type":"string"}}}},"RollbackConfig":{"description":"Specification for the rollback strategy of the service.","type":"object","properties":{"Delay":{"description":"Amount of time between rollback iterations, in nanoseconds.","type":"integer","format":"int64"},"FailureAction":{"description":"Action to take if an rolled back task fails to run, or stops running during the rollback.","type":"string","enum":["continue","pause"]},"MaxFailureRatio":{"description":"The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1.","type":"number","default":0},"Monitor":{"description":"Amount of time to monitor each rolled back task for failures, in nanoseconds.","type":"integer","format":"int64"},"Order":{"description":"The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down.","type":"string","enum":["stop-first","start-first"]},"Parallelism":{"description":"Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism).","type":"integer","format":"int64"}}},"TaskTemplate":{"$ref":"#/definitions/TaskSpec"},"UpdateConfig":{"description":"Specification for the update strategy of the service.","type":"object","properties":{"Delay":{"description":"Amount of time between updates, in nanoseconds.","type":"integer","format":"int64"},"FailureAction":{"description":"Action to take if an updated task fails to run, or stops running during the update.","type":"string","enum":["continue","pause","rollback"]},"MaxFailureRatio":{"description":"The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1.","type":"number","default":0},"Monitor":{"description":"Amount of time to monitor each updated task for failures, in nanoseconds.","type":"integer","format":"int64"},"Order":{"description":"The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down.","type":"string","enum":["stop-first","start-first"]},"Parallelism":{"description":"Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism).","type":"integer","format":"int64"}}}}},"ServiceUpdateResponse":{"type":"object","properties":{"Warnings":{"description":"Optional warning messages","type":"array","items":{"type":"string"}}},"example":{"Warning":"unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"}},"Swarm":{"type":"object","allOf":[{"$ref":"#/definitions/ClusterInfo"},{"type":"object","properties":{"JoinTokens":{"$ref":"#/definitions/JoinTokens"}}}]},"SwarmInfo":{"description":"Represents generic information about swarm.\n","type":"object","properties":{"Cluster":{"$ref":"#/definitions/ClusterInfo"},"ControlAvailable":{"type":"boolean","default":false,"example":true},"Error":{"type":"string","default":""},"LocalNodeState":{"$ref":"#/definitions/LocalNodeState"},"Managers":{"description":"Total number of managers in the swarm.","type":"integer","x-nullable":true,"example":3},"NodeAddr":{"description":"IP address at which this node can be reached by other nodes in the\nswarm.\n","type":"string","default":"","example":"10.0.0.46"},"NodeID":{"description":"Unique identifier of for this node in the swarm.","type":"string","default":"","example":"k67qz4598weg5unwwffg6z1m1"},"Nodes":{"description":"Total number of nodes in the swarm.","type":"integer","x-nullable":true,"example":4},"RemoteManagers":{"description":"List of ID's and addresses of other managers in the swarm.\n","type":"array","items":{"$ref":"#/definitions/PeerNode"},"x-nullable":true,"example":[{"Addr":"10.0.0.158:2377","NodeID":"71izy0goik036k48jg985xnds"},{"Addr":"10.0.0.159:2377","NodeID":"79y6h1o4gv8n120drcprv5nmc"},{"Addr":"10.0.0.46:2377","NodeID":"k67qz4598weg5unwwffg6z1m1"}]}}},"SwarmSpec":{"description":"User modifiable swarm configuration.","type":"object","properties":{"CAConfig":{"description":"CA configuration.","type":"object","properties":{"ExternalCAs":{"description":"Configuration for forwarding signing requests to an external certificate authority.","type":"array","items":{"type":"object","properties":{"CACert":{"description":"The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided).","type":"string"},"Options":{"description":"An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver.","type":"object","additionalProperties":{"type":"string"}},"Protocol":{"description":"Protocol for communication with the external CA (currently only `cfssl` is supported).","type":"string","default":"cfssl","enum":["cfssl"]},"URL":{"description":"URL where certificate signing requests should be sent.","type":"string"}}}},"ForceRotate":{"description":"An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`","type":"integer","format":"uint64"},"NodeCertExpiry":{"description":"The duration node certificates are issued for.","type":"integer","format":"int64","example":7776000000000000},"SigningCACert":{"description":"The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.","type":"string"},"SigningCAKey":{"description":"The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.","type":"string"}},"x-nullable":true},"Dispatcher":{"description":"Dispatcher configuration.","type":"object","properties":{"HeartbeatPeriod":{"description":"The delay for an agent to send a heartbeat to the dispatcher.","type":"integer","format":"int64","example":5000000000}},"x-nullable":true},"EncryptionConfig":{"description":"Parameters related to encryption-at-rest.","type":"object","properties":{"AutoLockManagers":{"description":"If set, generate a key and use it to lock data stored on the managers.","type":"boolean","example":false}}},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"},"example":{"com.example.corp.department":"engineering","com.example.corp.type":"production"}},"Name":{"description":"Name of the swarm.","type":"string","example":"default"},"Orchestration":{"description":"Orchestration configuration.","type":"object","properties":{"TaskHistoryRetentionLimit":{"description":"The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks.","type":"integer","format":"int64","example":10}},"x-nullable":true},"Raft":{"description":"Raft configuration.","type":"object","properties":{"ElectionTick":{"description":"The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`.\n\nA tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.\n","type":"integer","example":3},"HeartbeatTick":{"description":"The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers.\n\nA tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.\n","type":"integer","example":1},"KeepOldSnapshots":{"description":"The number of snapshots to keep beyond the current snapshot.","type":"integer","format":"uint64"},"LogEntriesForSlowFollowers":{"description":"The number of log entries to keep around to sync up slow followers after a snapshot is created.","type":"integer","format":"uint64","example":500},"SnapshotInterval":{"description":"The number of log entries between snapshots.","type":"integer","format":"uint64","example":10000}}},"TaskDefaults":{"description":"Defaults for creating tasks in this cluster.","type":"object","properties":{"LogDriver":{"description":"The log driver to use for tasks created in the orchestrator if\nunspecified by a service.\n\nUpdating this value only affects new tasks. Existing tasks continue\nto use their previously configured log driver until recreated.\n","type":"object","properties":{"Name":{"description":"The log driver to use as a default for new tasks.\n","type":"string","example":"json-file"},"Options":{"description":"Driver-specific options for the selectd log driver, specified\nas key/value pairs.\n","type":"object","additionalProperties":{"type":"string"},"example":{"max-file":"10","max-size":"100m"}}}}}}}},"SystemInfo":{"type":"object","properties":{"Architecture":{"description":"Hardware architecture of the host, as returned by the Go runtime\n(`GOARCH`).\n\nA full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).\n","type":"string","example":"x86_64"},"BridgeNfIp6tables":{"description":"Indicates if `bridge-nf-call-ip6tables` is available on the host.","type":"boolean","example":true},"BridgeNfIptables":{"description":"Indicates if `bridge-nf-call-iptables` is available on the host.","type":"boolean","example":true},"CPUSet":{"description":"Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.\n\nSee [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)\n","type":"boolean","example":true},"CPUShares":{"description":"Indicates if CPU Shares limiting is supported by the host.","type":"boolean","example":true},"CgroupDriver":{"description":"The driver to use for managing cgroups.\n","type":"string","default":"cgroupfs","enum":["cgroupfs","systemd"],"example":"cgroupfs"},"ClusterAdvertise":{"description":"The network endpoint that the Engine advertises for the purpose of\nnode discovery. ClusterAdvertise is a `host:port` combination on which\nthe daemon is reachable by other hosts.\n\n


    \n\n> **Note**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n","type":"string","example":"node5.corp.example.com:8000"},"ClusterStore":{"description":"URL of the distributed storage backend.\n\n\nThe storage backend is used for multihost networking (to store\nnetwork and endpoint information) and by the node discovery mechanism.\n\n


    \n\n> **Note**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n","type":"string","example":"consul://consul.corp.example.com:8600/some/path"},"ContainerdCommit":{"$ref":"#/definitions/Commit"},"Containers":{"description":"Total number of containers on the host.","type":"integer","example":14},"ContainersPaused":{"description":"Number of containers with status `\"paused\"`.\n","type":"integer","example":1},"ContainersRunning":{"description":"Number of containers with status `\"running\"`.\n","type":"integer","example":3},"ContainersStopped":{"description":"Number of containers with status `\"stopped\"`.\n","type":"integer","example":10},"CpuCfsPeriod":{"description":"Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host.","type":"boolean","example":true},"CpuCfsQuota":{"description":"Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host.","type":"boolean","example":true},"Debug":{"description":"Indicates if the daemon is running in debug-mode / with debug-level logging enabled.","type":"boolean","example":true},"DefaultRuntime":{"description":"Name of the default OCI runtime that is used when starting containers.\n\nThe default can be overridden per-container at create time.\n","type":"string","default":"runc","example":"runc"},"DockerRootDir":{"description":"Root directory of persistent Docker state.\n\nDefaults to `/var/lib/docker` on Linux, and `C:\\ProgramData\\docker`\non Windows.\n","type":"string","example":"/var/lib/docker"},"Driver":{"description":"Name of the storage driver in use.","type":"string","example":"overlay2"},"DriverStatus":{"description":"Information specific to the storage driver, provided as\n\"label\" / \"value\" pairs.\n\nThis information is provided by the storage driver, and formatted\nin a way consistent with the output of `docker info` on the command\nline.\n\n


    \n\n> **Note**: The information returned in this field, including the\n> formatting of values and labels, should not be considered stable,\n> and may change without notice.\n","type":"array","items":{"type":"array","items":{"type":"string"}},"example":[["Backing Filesystem","extfs"],["Supports d_type","true"],["Native Overlay Diff","true"]]},"ExperimentalBuild":{"description":"Indicates if experimental features are enabled on the daemon.\n","type":"boolean","example":true},"GenericResources":{"$ref":"#/definitions/GenericResources"},"HttpProxy":{"description":"HTTP-proxy configured for the daemon. This value is obtained from the\n[`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\n\nContainers do not automatically inherit this configuration.\n","type":"string","example":"http://user:pass@proxy.corp.example.com:8080"},"HttpsProxy":{"description":"HTTPS-proxy configured for the daemon. This value is obtained from the\n[`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\n\nContainers do not automatically inherit this configuration.\n","type":"string","example":"https://user:pass@proxy.corp.example.com:4443"},"ID":{"description":"Unique identifier of the daemon.\n\n


    \n\n> **Note**: The format of the ID itself is not part of the API, and\n> should not be considered stable.\n","type":"string","example":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"},"IPv4Forwarding":{"description":"Indicates IPv4 forwarding is enabled.","type":"boolean","example":true},"Images":{"description":"Total number of images on the host.\n\nBoth _tagged_ and _untagged_ (dangling) images are counted.\n","type":"integer","example":508},"IndexServerAddress":{"description":"Address / URL of the index server that is used for image search,\nand as a default for user authentication for Docker Hub and Docker Cloud.\n","type":"string","default":"https://index.docker.io/v1/","example":"https://index.docker.io/v1/"},"InitBinary":{"description":"Name and, optional, path of the `docker-init` binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n","type":"string","example":"docker-init"},"InitCommit":{"$ref":"#/definitions/Commit"},"Isolation":{"description":"Represents the isolation technology to use as a default for containers.\nThe supported values are platform-specific.\n\nIf no isolation value is specified on daemon start, on Windows client,\nthe default is `hyperv`, and on Windows server, the default is `process`.\n\nThis option is currently not used on other platforms.\n","type":"string","default":"default","enum":["default","hyperv","process"]},"KernelMemory":{"description":"Indicates if the host has kernel memory limit support enabled.","type":"boolean","example":true},"KernelVersion":{"description":"Kernel version of the host.\n\nOn Linux, this information obtained from `uname`. On Windows this\ninformation is queried from the HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\\nregistry value, for example _\"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)\"_.\n","type":"string","example":"4.9.38-moby"},"Labels":{"description":"User-defined labels (key/value metadata) as set on the daemon.\n\n


    \n\n> **Note**: When part of a Swarm, nodes can both have _daemon_ labels,\n> set through the daemon configuration, and _node_ labels, set from a\n> manager node in the Swarm. Node labels are not included in this\n> field. Node labels can be retrieved using the `/nodes/(id)` endpoint\n> on a manager node in the Swarm.\n","type":"array","items":{"type":"string"},"example":["storage=ssd","production"]},"LiveRestoreEnabled":{"description":"Indicates if live restore is enabled.\n\nIf enabled, containers are kept running when the daemon is shutdown\nor upon daemon start if running containers are detected.\n","type":"boolean","default":false,"example":false},"LoggingDriver":{"description":"The logging driver to use as a default for new containers.\n","type":"string"},"MemTotal":{"description":"Total amount of physical memory available on the host, in kilobytes (kB).\n","type":"integer","format":"int64","example":2095882240},"MemoryLimit":{"description":"Indicates if the host has memory limit support enabled.","type":"boolean","example":true},"NCPU":{"description":"The number of logical CPUs usable by the daemon.\n\nThe number of available CPUs is checked by querying the operating\nsystem when the daemon starts. Changes to operating system CPU\nallocation after the daemon is started are not reflected.\n","type":"integer","example":4},"NEventsListener":{"description":"Number of event listeners subscribed.","type":"integer","example":30},"NFd":{"description":"The total number of file Descriptors in use by the daemon process.\n\nThis information is only returned if debug-mode is enabled.\n","type":"integer","example":64},"NGoroutines":{"description":"The number of goroutines that currently exist.\n\nThis information is only returned if debug-mode is enabled.\n","type":"integer","example":174},"Name":{"description":"Hostname of the host.","type":"string","example":"node5.corp.example.com"},"NoProxy":{"description":"Comma-separated list of domain extensions for which no proxy should be\nused. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)\nenvironment variable.\n\nContainers do not automatically inherit this configuration.\n","type":"string","example":"*.local, 169.254/16"},"OSType":{"description":"Generic type of the operating system of the host, as returned by the\nGo runtime (`GOOS`).\n\nCurrently returned values are \"linux\" and \"windows\". A full list of\npossible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).\n","type":"string","example":"linux"},"OomKillDisable":{"description":"Indicates if OOM killer disable is supported on the host.","type":"boolean"},"OperatingSystem":{"description":"Name of the host's operating system, for example: \"Ubuntu 16.04.2 LTS\"\nor \"Windows Server 2016 Datacenter\"\n","type":"string","example":"Alpine Linux v3.5"},"Plugins":{"$ref":"#/definitions/PluginsInfo"},"ProductLicense":{"description":"Reports a summary of the product license on the daemon.\n\nIf a commercial license has been applied to the daemon, information\nsuch as number of nodes, and expiration are included.\n","type":"string","example":"Community Engine"},"RegistryConfig":{"$ref":"#/definitions/RegistryServiceConfig"},"RuncCommit":{"$ref":"#/definitions/Commit"},"Runtimes":{"description":"List of [OCI compliant](https://github.com/opencontainers/runtime-spec)\nruntimes configured on the daemon. Keys hold the \"name\" used to\nreference the runtime.\n\nThe Docker daemon relies on an OCI compliant runtime (invoked via the\n`containerd` daemon) as its interface to the Linux kernel namespaces,\ncgroups, and SELinux.\n\nThe default runtime is `runc`, and automatically configured. Additional\nruntimes can be configured by the user and will be listed here.\n","type":"object","default":{"runc":{"path":"docker-runc"}},"additionalProperties":{"$ref":"#/definitions/Runtime"},"example":{"custom":{"path":"/usr/local/bin/my-oci-runtime","runtimeArgs":["--debug","--systemd-cgroup=false"]},"runc":{"path":"docker-runc"},"runc-master":{"path":"/go/bin/runc"}}},"SecurityOptions":{"description":"List of security features that are enabled on the daemon, such as\napparmor, seccomp, SELinux, and user-namespaces (userns).\n\nAdditional configuration options for each security feature may\nbe present, and are included as a comma-separated list of key/value\npairs.\n","type":"array","items":{"type":"string"},"example":["name=apparmor","name=seccomp,profile=default","name=selinux","name=userns"]},"ServerVersion":{"description":"Version string of the daemon.\n\n> **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/)\n> returns the Swarm version instead of the daemon version, for example\n> `swarm/1.2.8`.\n","type":"string","example":"17.06.0-ce"},"SwapLimit":{"description":"Indicates if the host has memory swap limit support enabled.","type":"boolean","example":true},"Swarm":{"$ref":"#/definitions/SwarmInfo"},"SystemStatus":{"description":"Status information about this node (standalone Swarm API).\n\n


    \n\n> **Note**: The information returned in this field is only propagated\n> by the Swarm standalone API, and is empty (`null`) when using\n> built-in swarm mode.\n","type":"array","items":{"type":"array","items":{"type":"string"}},"example":[["Role","primary"],["State","Healthy"],["Strategy","spread"],["Filters","health, port, containerslots, dependency, affinity, constraint, whitelist"],["Nodes","2"],[" swarm-agent-00","192.168.99.102:2376"],[" └ ID","5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"],[" └ Status","Healthy"],[" └ Containers","1 (1 Running, 0 Paused, 0 Stopped)"],[" └ Reserved CPUs","0 / 1"],[" └ Reserved Memory","0 B / 1.021 GiB"],[" └ Labels","kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"],[" └ UpdatedAt","2017-08-09T10:03:46Z"],[" └ ServerVersion","17.06.0-ce"],[" swarm-manager","192.168.99.101:2376"],[" └ ID","TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"],[" └ Status","Healthy"],[" └ Containers","2 (2 Running, 0 Paused, 0 Stopped)"],[" └ Reserved CPUs","0 / 1"],[" └ Reserved Memory","0 B / 1.021 GiB"],[" └ Labels","kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"],[" └ UpdatedAt","2017-08-09T10:04:11Z"],[" └ ServerVersion","17.06.0-ce"]]},"SystemTime":{"description":"Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat with nano-seconds.\n","type":"string","example":"2017-08-08T20:28:29.06202363Z"},"Warnings":{"description":"List of warnings / informational messages about missing features, or\nissues related to the daemon configuration.\n\nThese messages can be printed by the client as information to the user.\n","type":"array","items":{"type":"string"},"example":["WARNING: No memory limit support","WARNING: bridge-nf-call-iptables is disabled","WARNING: bridge-nf-call-ip6tables is disabled"]}}},"TLSInfo":{"description":"Information about the issuer of leaf TLS certificates and the trusted root CA certificate","type":"object","properties":{"CertIssuerPublicKey":{"description":"The base64-url-safe-encoded raw public key bytes of the issuer","type":"string"},"CertIssuerSubject":{"description":"The base64-url-safe-encoded raw subject bytes of the issuer","type":"string"},"TrustRoot":{"description":"The root CA certificate(s) that are used to validate leaf TLS certificates","type":"string"}},"example":{"CertIssuerPublicKey":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==","CertIssuerSubject":"MBMxETAPBgNVBAMTCHN3YXJtLWNh","TrustRoot":"-----BEGIN CERTIFICATE-----\nMIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0\nMzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf\n3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO\nPQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz\npxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H\n-----END CERTIFICATE-----\n"}},"Task":{"type":"object","properties":{"AssignedGenericResources":{"$ref":"#/definitions/GenericResources"},"CreatedAt":{"type":"string","format":"dateTime"},"DesiredState":{"$ref":"#/definitions/TaskState"},"ID":{"description":"The ID of the task.","type":"string"},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"}},"Name":{"description":"Name of the task.","type":"string"},"NodeID":{"description":"The ID of the node that this task is on.","type":"string"},"ServiceID":{"description":"The ID of the service this task is part of.","type":"string"},"Slot":{"type":"integer"},"Spec":{"$ref":"#/definitions/TaskSpec"},"Status":{"type":"object","properties":{"ContainerStatus":{"type":"object","properties":{"ContainerID":{"type":"string"},"ExitCode":{"type":"integer"},"PID":{"type":"integer"}}},"Err":{"type":"string"},"Message":{"type":"string"},"State":{"$ref":"#/definitions/TaskState"},"Timestamp":{"type":"string","format":"dateTime"}}},"UpdatedAt":{"type":"string","format":"dateTime"},"Version":{"$ref":"#/definitions/ObjectVersion"}},"example":{"AssignedGenericResources":[{"DiscreteResourceSpec":{"Kind":"SSD","Value":3}},{"NamedResourceSpec":{"Kind":"GPU","Value":"UUID1"}},{"NamedResourceSpec":{"Kind":"GPU","Value":"UUID2"}}],"CreatedAt":"2016-06-07T21:07:31.171892745Z","DesiredState":"running","ID":"0kzzo1i0y4jz6027t0k7aezc7","NetworksAttachments":[{"Addresses":["10.255.0.10/16"],"Network":{"CreatedAt":"2016-06-07T20:31:11.912919752Z","DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"ID":"4qvuz4ko70xaltuqbt8956gd1","IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{"Name":"default"}},"Spec":{"DriverConfiguration":{},"IPAMOptions":{"Configs":[{"Gateway":"10.255.0.1","Subnet":"10.255.0.0/16"}],"Driver":{}},"Labels":{"com.docker.swarm.internal":"true"},"Name":"ingress"},"UpdatedAt":"2016-06-07T21:07:29.955277358Z","Version":{"Index":18}}}],"NodeID":"60gvrl6tm78dmak4yl7srz94v","ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"Spec":{"ContainerSpec":{"Image":"redis"},"Placement":{},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0}},"Status":{"ContainerStatus":{"ContainerID":"e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035","PID":677},"Message":"started","State":"running","Timestamp":"2016-06-07T21:07:31.290032978Z"},"UpdatedAt":"2016-06-07T21:07:31.376370513Z","Version":{"Index":71}}},"TaskSpec":{"description":"User modifiable task configuration.","type":"object","properties":{"ContainerSpec":{"description":"Container spec for the service.\n\n


    \n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n","type":"object","properties":{"Args":{"description":"Arguments to the command.","type":"array","items":{"type":"string"}},"Command":{"description":"The command to be run in the image.","type":"array","items":{"type":"string"}},"Configs":{"description":"Configs contains references to zero or more configs that will be exposed to the service.","type":"array","items":{"type":"object","properties":{"ConfigID":{"description":"ConfigID represents the ID of the specific config that we're referencing.","type":"string"},"ConfigName":{"description":"ConfigName is the name of the config that this references, but this is just provided for\nlookup/display purposes. The config in the reference will be identified by its ID.\n","type":"string"},"File":{"description":"File represents a specific target that is backed by a file.","type":"object","properties":{"GID":{"description":"GID represents the file GID.","type":"string"},"Mode":{"description":"Mode represents the FileMode of the file.","type":"integer","format":"uint32"},"Name":{"description":"Name represents the final filename in the filesystem.","type":"string"},"UID":{"description":"UID represents the file UID.","type":"string"}}}}}},"DNSConfig":{"description":"Specification for DNS related configurations in resolver configuration file (`resolv.conf`).","type":"object","properties":{"Nameservers":{"description":"The IP addresses of the name servers.","type":"array","items":{"type":"string"}},"Options":{"description":"A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.).","type":"array","items":{"type":"string"}},"Search":{"description":"A search list for host-name lookup.","type":"array","items":{"type":"string"}}}},"Dir":{"description":"The working directory for commands to run in.","type":"string"},"Env":{"description":"A list of environment variables in the form `VAR=value`.","type":"array","items":{"type":"string"}},"Groups":{"description":"A list of additional groups that the container process will run as.","type":"array","items":{"type":"string"}},"HealthCheck":{"$ref":"#/definitions/HealthConfig"},"Hostname":{"description":"The hostname to use for the container, as a valid RFC 1123 hostname.","type":"string"},"Hosts":{"description":"A list of hostname/IP mappings to add to the container's `hosts`\nfile. The format of extra hosts is specified in the\n[hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)\nman page:\n\n IP_address canonical_hostname [aliases...]\n","type":"array","items":{"type":"string"}},"Image":{"description":"The image name to use for the container","type":"string"},"Init":{"description":"Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used.","type":"boolean","x-nullable":true},"Isolation":{"description":"Isolation technology of the containers running the service. (Windows only)","type":"string","enum":["default","process","hyperv"]},"Labels":{"description":"User-defined key/value data.","type":"object","additionalProperties":{"type":"string"}},"Mounts":{"description":"Specification for mounts to be added to containers created as part of the service.","type":"array","items":{"$ref":"#/definitions/Mount"}},"OpenStdin":{"description":"Open `stdin`","type":"boolean"},"Privileges":{"description":"Security options for the container","type":"object","properties":{"CredentialSpec":{"description":"CredentialSpec for managed service account (Windows only)","type":"object","properties":{"File":{"description":"Load credential spec from this file. The file is read by the daemon, and must be present in the\n`CredentialSpecs` subdirectory in the docker data directory, which defaults to\n`C:\\ProgramData\\Docker\\` on Windows.\n\nFor example, specifying `spec.json` loads `C:\\ProgramData\\Docker\\CredentialSpecs\\spec.json`.\n\n


    \n\n> **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive.\n","type":"string"},"Registry":{"description":"Load credential spec from this value in the Windows registry. The specified registry value must be\nlocated in:\n\n`HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Virtualization\\Containers\\CredentialSpecs`\n\n


    \n\n\n> **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive.\n","type":"string"}}},"SELinuxContext":{"description":"SELinux labels of the container","type":"object","properties":{"Disable":{"description":"Disable SELinux","type":"boolean"},"Level":{"description":"SELinux level label","type":"string"},"Role":{"description":"SELinux role label","type":"string"},"Type":{"description":"SELinux type label","type":"string"},"User":{"description":"SELinux user label","type":"string"}}}}},"ReadOnly":{"description":"Mount the container's root filesystem as read only.","type":"boolean"},"Secrets":{"description":"Secrets contains references to zero or more secrets that will be exposed to the service.","type":"array","items":{"type":"object","properties":{"File":{"description":"File represents a specific target that is backed by a file.","type":"object","properties":{"GID":{"description":"GID represents the file GID.","type":"string"},"Mode":{"description":"Mode represents the FileMode of the file.","type":"integer","format":"uint32"},"Name":{"description":"Name represents the final filename in the filesystem.","type":"string"},"UID":{"description":"UID represents the file UID.","type":"string"}}},"SecretID":{"description":"SecretID represents the ID of the specific secret that we're referencing.","type":"string"},"SecretName":{"description":"SecretName is the name of the secret that this references, but this is just provided for\nlookup/display purposes. The secret in the reference will be identified by its ID.\n","type":"string"}}}},"StopGracePeriod":{"description":"Amount of time to wait for the container to terminate before forcefully killing it.","type":"integer","format":"int64"},"StopSignal":{"description":"Signal to stop the container.","type":"string"},"TTY":{"description":"Whether a pseudo-TTY should be allocated.","type":"boolean"},"User":{"description":"The user inside the container.","type":"string"}}},"ForceUpdate":{"description":"A counter that triggers an update even if no relevant parameters have been changed.","type":"integer"},"LogDriver":{"description":"Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified.","type":"object","properties":{"Name":{"type":"string"},"Options":{"type":"object","additionalProperties":{"type":"string"}}}},"NetworkAttachmentSpec":{"description":"Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n


    \n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n","type":"object","properties":{"ContainerID":{"description":"ID of the container represented by this task","type":"string"}}},"Networks":{"type":"array","items":{"type":"object","properties":{"Aliases":{"type":"array","items":{"type":"string"}},"Target":{"type":"string"}}}},"Placement":{"type":"object","properties":{"Constraints":{"description":"An array of constraints.","type":"array","items":{"type":"string"},"example":["node.hostname!=node3.corp.example.com","node.role!=manager","node.labels.type==production"]},"Platforms":{"description":"Platforms stores all the platforms that the service's image can\nrun on. This field is used in the platform filter for scheduling.\nIf empty, then the platform filter is off, meaning there are no\nscheduling restrictions.\n","type":"array","items":{"$ref":"#/definitions/Platform"}},"Preferences":{"description":"Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence.","type":"array","items":{"type":"object","properties":{"Spread":{"type":"object","properties":{"SpreadDescriptor":{"description":"label descriptor, such as engine.labels.az","type":"string"}}}}},"example":[{"Spread":{"SpreadDescriptor":"node.labels.datacenter"}},{"Spread":{"SpreadDescriptor":"node.labels.rack"}}]}}},"PluginSpec":{"description":"Plugin spec for the service. *(Experimental release only.)*\n\n


    \n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n","type":"object","properties":{"Disabled":{"description":"Disable the plugin once scheduled.","type":"boolean"},"Name":{"description":"The name or 'alias' to use for the plugin.","type":"string"},"PluginPrivilege":{"type":"array","items":{"description":"Describes a permission accepted by the user upon installing the plugin.","type":"object","properties":{"Description":{"type":"string"},"Name":{"type":"string"},"Value":{"type":"array","items":{"type":"string"}}}}},"Remote":{"description":"The plugin image reference to use.","type":"string"}}},"Resources":{"description":"Resource requirements which apply to each individual container created as part of the service.","type":"object","properties":{"Limits":{"description":"Define resources limits.","$ref":"#/definitions/ResourceObject"},"Reservation":{"description":"Define resources reservation.","$ref":"#/definitions/ResourceObject"}}},"RestartPolicy":{"description":"Specification for the restart policy which applies to containers created as part of this service.","type":"object","properties":{"Condition":{"description":"Condition for restart.","type":"string","enum":["none","on-failure","any"]},"Delay":{"description":"Delay between restart attempts.","type":"integer","format":"int64"},"MaxAttempts":{"description":"Maximum attempts to restart a given container before giving up (default value is 0, which is ignored).","type":"integer","format":"int64","default":0},"Window":{"description":"Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded).","type":"integer","format":"int64","default":0}}},"Runtime":{"description":"Runtime is the type of runtime specified for the task executor.","type":"string"}}},"TaskState":{"type":"string","enum":["new","allocated","pending","assigned","accepted","preparing","ready","starting","running","complete","shutdown","failed","rejected","remove","orphaned"]},"ThrottleDevice":{"type":"object","properties":{"Path":{"description":"Device path","type":"string"},"Rate":{"description":"Rate","type":"integer","format":"int64","minimum":0}}},"Volume":{"type":"object","required":["Name","Driver","Mountpoint","Labels","Scope","Options"],"properties":{"CreatedAt":{"description":"Date/Time the volume was created.","type":"string","format":"dateTime"},"Driver":{"description":"Name of the volume driver used by the volume.","type":"string","x-nullable":false},"Labels":{"description":"User-defined key/value metadata.","type":"object","additionalProperties":{"type":"string"},"x-nullable":false},"Mountpoint":{"description":"Mount path of the volume on the host.","type":"string","x-nullable":false},"Name":{"description":"Name of the volume.","type":"string","x-nullable":false},"Options":{"description":"The driver specific options used when creating the volume.","type":"object","additionalProperties":{"type":"string"}},"Scope":{"description":"The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.","type":"string","default":"local","enum":["local","global"],"x-nullable":false},"Status":{"description":"Low-level details about the volume, provided by the volume driver.\nDetails are returned as a map with key/value pairs:\n`{\"key\":\"value\",\"key2\":\"value2\"}`.\n\nThe `Status` field is optional, and is omitted if the volume driver\ndoes not support this feature.\n","type":"object","additionalProperties":{"type":"object"}},"UsageData":{"description":"Usage details about the volume. This information is used by the\n`GET /system/df` endpoint, and omitted in other endpoints.\n","type":"object","required":["Size","RefCount"],"properties":{"RefCount":{"description":"The number of containers referencing this volume. This field\nis set to `-1` if the reference-count is not available.\n","type":"integer","default":-1,"x-nullable":false},"Size":{"description":"Amount of disk space used by the volume (in bytes). This information\nis only available for volumes created with the `\"local\"` volume\ndriver. For volumes created with other volume drivers, this field\nis set to `-1` (\"not available\")\n","type":"integer","default":-1,"x-nullable":false}},"x-nullable":true}},"example":{"CreatedAt":"2016-06-07T20:31:11.853781916Z","Driver":"custom","Labels":{"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"},"Mountpoint":"/var/lib/docker/volumes/tardis","Name":"tardis","Scope":"local","Status":{"hello":"world"}}},"api.putConfigOrLicenseResponse":{"id":"api.putConfigOrLicenseResponse","required":["message"],"properties":{"message":{"type":"string"}}},"auth.Credentials":{"id":"auth.Credentials","properties":{"password":{"type":"string"},"token":{"type":"string"},"username":{"type":"string"}}},"auth.LoginResponse":{"id":"auth.LoginResponse","properties":{"auth_token":{"type":"string"}}},"authz.Collection":{"id":"authz.Collection","required":["name","path","id","parent_ids","label_constraints","legacylabelkey","legacylabelvalue","created_at","updated_at"],"properties":{"created_at":{"description":"When the collection was created","type":"string","format":"date-time"},"id":{"description":"A unique ID for this collection","type":"string"},"label_constraints":{"description":"A set of label constraints to be applied to any service or container created in this collection","type":"array","items":{"$ref":"#/definitions/authz.LabelConstraint"}},"legacylabelkey":{"description":"The key of the legacy authorization label for this collection","type":"string"},"legacylabelvalue":{"description":"The value of the legacy authorization label for this collection","type":"string"},"name":{"description":"The name of the collection","type":"string"},"parent_ids":{"description":"A list of collection IDs of parent collections","type":"array","items":{"type":"string"}},"path":{"description":"The full path of the collection","type":"string"},"updated_at":{"description":"When the collection was updated","type":"string","format":"date-time"}}},"authz.CollectionCreate":{"id":"authz.CollectionCreate","required":["name","parent_id","label_constraints","legacy_label_key","legacy_label_value"],"properties":{"label_constraints":{"type":"array","items":{"$ref":"#/definitions/authz.LabelConstraint"}},"legacy_label_key":{"type":"string"},"legacy_label_value":{"type":"string"},"name":{"type":"string"},"parent_id":{"type":"string"}}},"authz.CollectionCreateResponse":{"id":"authz.CollectionCreateResponse","required":["id"],"properties":{"id":{"type":"string"}}},"authz.CollectionID":{"id":"authz.CollectionID","required":["id"],"properties":{"id":{"type":"string"}}},"authz.CollectionUpdate":{"id":"authz.CollectionUpdate","required":["label_constraints"],"properties":{"label_constraints":{"type":"array","items":{"$ref":"#/definitions/authz.LabelConstraint"}}}},"authz.LabelConstraint":{"id":"authz.LabelConstraint","required":["type","label_key","label_value","equality"],"properties":{"equality":{"type":"boolean"},"label_key":{"type":"string"},"label_value":{"type":"string"},"type":{"type":"string"}}},"authz.RoleCreateResponse":{"id":"authz.RoleCreateResponse","required":["id"],"properties":{"id":{"description":"The ID of the newly created role","type":"string"}}},"config.AuditLogConfiguration":{"id":"config.AuditLogConfiguration","required":["level","support_dump_include_audit_logs"],"properties":{"level":{"type":"string"},"support_dump_include_audit_logs":{"type":"boolean"}}},"config.AuthConfiguration":{"id":"config.AuthConfiguration","required":["sessions","saml","backend"],"properties":{"backend":{"description":"The name of the auth backend to use","type":"string","enum":["managed","ldap"]},"defaultNewUserRole":{"type":"string"},"managedPasswordDisabled":{"description":"Whether the managed authentication is disabled in the system","type":"boolean"},"managedPasswordFallbackUser":{"description":"Fallback user that may log in when basic authentication is disabled","type":"string"},"saml":{"$ref":"#/definitions/forms.SAMLSettings"},"samlEnabled":{"description":"Whether SAML SSO is enabled in the system","type":"boolean"},"samlLoginText":{"description":"Customized SAML Login Text","type":"string"},"sessions":{"$ref":"#/definitions/forms.SessionsConfig"}}},"config.HTTPHeader":{"id":"config.HTTPHeader","required":["name","value"],"properties":{"name":{"type":"string"},"value":{"type":"string"}}},"config.LicenseConfiguration":{"id":"config.LicenseConfiguration","required":["auto_refresh","license_server_url","license_server_public_key"],"properties":{"auto_refresh":{"type":"boolean"},"license_server_public_key":{"type":"string"},"license_server_url":{"type":"string"}}},"config.LogConfiguration":{"id":"config.LogConfiguration","required":["level"],"properties":{"level":{"type":"string"}}},"config.SchedulingConfiguration":{"id":"config.SchedulingConfiguration","required":["enable_admin_ucp_scheduling","default_node_orchestrator"],"properties":{"default_node_orchestrator":{"type":"string"},"enable_admin_ucp_scheduling":{"type":"boolean"}}},"config.TrackingConfiguration":{"id":"config.TrackingConfiguration","required":["disable_usageinfo","disable_tracking","anonymize_tracking","ClusterLabel"],"properties":{"ClusterLabel":{"type":"string"},"anonymize_tracking":{"type":"boolean"},"disable_tracking":{"type":"boolean"},"disable_usageinfo":{"type":"boolean"}}},"config.TrustConfiguration":{"id":"config.TrustConfiguration","required":["require_content_trust","require_signature_from"],"properties":{"require_content_trust":{"type":"boolean"},"require_signature_from":{"type":"array","items":{"type":"string"}}}},"config.TrustedRegistryConfig":{"id":"config.TrustedRegistryConfig","required":["hostAddress","serviceID","caBundle","batchScanningDataEnabled"],"properties":{"batchScanningDataEnabled":{"type":"boolean"},"caBundle":{"type":"string"},"hostAddress":{"type":"string"},"serviceID":{"type":"string"}}},"config.UCPConfiguration":{"id":"config.UCPConfiguration","required":["auth","Registries","SchedulingConfiguration","TrackingConfiguration","TrustConfiguration","LogConfiguration","AuditLogConfiguration","LicenseConfiguration","customAPIServerHeaders","ClusterConfig"],"properties":{"AuditLogConfiguration":{"$ref":"#/definitions/config.AuditLogConfiguration"},"ClusterConfig":{"$ref":"#/definitions/types.ClusterConfig"},"LicenseConfiguration":{"$ref":"#/definitions/config.LicenseConfiguration"},"LogConfiguration":{"$ref":"#/definitions/config.LogConfiguration"},"Registries":{"type":"array","items":{"$ref":"#/definitions/config.TrustedRegistryConfig"}},"SchedulingConfiguration":{"$ref":"#/definitions/config.SchedulingConfiguration"},"TrackingConfiguration":{"$ref":"#/definitions/config.TrackingConfiguration"},"TrustConfiguration":{"$ref":"#/definitions/config.TrustConfiguration"},"auth":{"$ref":"#/definitions/config.AuthConfiguration"},"customAPIServerHeaders":{"type":"array","items":{"$ref":"#/definitions/config.HTTPHeader"}}}},"errors.APIError":{"id":"errors.APIError","required":["code","message"],"properties":{"code":{"type":"string"},"detail":{"$ref":"#/definitions/errors.APIError.detail"},"message":{"type":"string"}}},"errors.APIError.detail":{"id":"errors.APIError.detail"},"forms.BulkOperation":{"id":"forms.BulkOperation","required":["op"],"properties":{"op":{"description":"The operation to perform","type":"string"},"ref":{"description":"An identifier referencing the object on which to perform the operation, if applicable","type":"string"},"value":{"description":"The form value to submit for the operation, if applicable","type":"string"}}},"forms.BulkOperations":{"id":"forms.BulkOperations","required":["operations"],"properties":{"operations":{"type":"array","items":{"$ref":"#/definitions/forms.BulkOperation"}}}},"forms.Certificate":{"id":"forms.Certificate","required":["label","cert"],"properties":{"cert":{"description":"Encoded PEM for the cert","type":"string"},"label":{"description":"Label for the certificate","type":"string"}}},"forms.ChangePassword":{"id":"forms.ChangePassword","required":["oldPassword","newPassword"],"properties":{"newPassword":{"description":"User's new password","type":"string"},"oldPassword":{"description":"User's current password. Required if the client is changing their own password. May be omitted if an admin is changing another user's password","type":"string"}}},"forms.CreateAccount":{"id":"forms.CreateAccount","required":["name"],"properties":{"fullName":{"description":"Full name of account","type":"string"},"isActive":{"description":"Whether the user is active and can login (users only)","type":"boolean"},"isAdmin":{"description":"Whether the user is an admin (users only)","type":"boolean"},"isOrg":{"description":"Whether the account is an organization","type":"boolean"},"name":{"description":"Name of account","type":"string"},"password":{"description":"Password for the user (users only)","type":"string"},"searchLDAP":{"description":"Whether the user should be found by searching against the currently configured LDAP servers. If true, the password field may be omitted and the discovered full name of the user will be used if one is not specified in this form (users only)","type":"boolean"}}},"forms.CreateAccountPublicKey":{"id":"forms.CreateAccountPublicKey","required":["publicKey"],"properties":{"certificates":{"description":"certificates for the public key","type":"array","items":{"$ref":"#/definitions/forms.Certificate"}},"label":{"description":"Label or description for the key","type":"string"},"publicKey":{"description":"Encoded PEM for the public key","type":"string"}}},"forms.CreateTeam":{"id":"forms.CreateTeam","required":["name"],"properties":{"description":{"description":"Description of the team","type":"string"},"name":{"description":"Name of the team","type":"string"}}},"forms.GroupLinkOpts":{"id":"forms.GroupLinkOpts","required":["enableLink","groupName"],"properties":{"enableLink":{"description":"Whether to enable SAML linking. If false, all other fields are ignored","type":"boolean"},"groupName":{"description":"The group name that is obtained from group attribute of the SAML assertion","type":"string"}}},"forms.LDAPDomainServerConfig":{"id":"forms.LDAPDomainServerConfig","required":["domain","serverURL","noSimplePagination","startTLS","rootCerts","tlsSkipVerify","readerDN","readerPassword"],"properties":{"domain":{"description":"The root domain component of this server, e.g., dc=example,dc=com. A longest-suffix match of the base DN for LDAP searches is used to select which LDAP server to use for search requests. If no matching domain is found, the default LDAP server config is used.","type":"string"},"noSimplePagination":{"description":"The server does not support the Simple Paged Results control extension (RFC 2696)","type":"boolean"},"readerDN":{"description":"The distinguished name the system will use to bind to the LDAP server when performing searches","type":"string"},"readerPassword":{"description":"The password that the system will use to bind to the LDAP server when performing searches","type":"string"},"rootCerts":{"description":"A root certificate PEM bundle to use when establishing a TLS connection to the server","type":"string"},"serverURL":{"description":"The URL of the LDAP server","type":"string"},"startTLS":{"description":"Whether to use StartTLS to secure the connection to the server, ignored if server URL scheme is 'ldaps://'","type":"boolean"},"tlsSkipVerify":{"description":"Whether to skip verifying of the server's certificate when establishing a TLS connection, not recommended unless testing on a secure network","type":"boolean"}}},"forms.LDAPSettings":{"id":"forms.LDAPSettings","required":["syncSchedule","jitUserProvisioning","startTLS","rootCerts","tlsSkipVerify","readerDN","readerPassword","serverURL","noSimplePagination","additionalDomains","userSearchConfigs","adminSyncOpts"],"properties":{"additionalDomains":{"description":"A list of additional LDAP domains (and corresponding server configs) from which to sync users and team members","type":"array","items":{"$ref":"#/definitions/forms.LDAPDomainServerConfig"}},"adminSyncOpts":{"description":"Settings for syncing system admin users","$ref":"#/definitions/forms.MemberSyncOpts"},"jitUserProvisioning":{"description":"Whether to only create user accounts upon first login (recommended)","type":"boolean"},"noSimplePagination":{"description":"The server does not support the Simple Paged Results control extension (RFC 2696)","type":"boolean"},"readerDN":{"description":"The distinguished name the system will use to bind to the LDAP server when performing searches","type":"string"},"readerPassword":{"description":"The password that the system will use to bind to the LDAP server when performing searches","type":"string"},"rootCerts":{"description":"A root certificate PEM bundle to use when establishing a TLS connection to the server","type":"string"},"serverURL":{"description":"The URL of the LDAP server","type":"string"},"startTLS":{"description":"Whether to use StartTLS to secure the connection to the server, ignored if server URL scheme is 'ldaps://'","type":"boolean"},"syncSchedule":{"description":"The scheduled time for automatic LDAP sync jobs. Can be specified in CRON table entry format (with a seconds field always set to 0): '0 (minutes) (hours) (day of month) (month of year) (day of week, optional)'. Can also be specified as a predefined scheduling definition: '@hourly', '@daily', or '@weekly'. Default is '@hourly' if empty or omitted","type":"string"},"tlsSkipVerify":{"description":"Whether to skip verifying of the server's certificate when establishing a TLS connection, not recommended unless testing on a secure network","type":"boolean"},"userSearchConfigs":{"description":"One or more settings for syncing users","type":"array","items":{"$ref":"#/definitions/forms.UserSearchOpts"}}}},"forms.MemberSyncOpts":{"id":"forms.MemberSyncOpts","required":["enableSync","selectGroupMembers","groupDN","groupMemberAttr","searchBaseDN","searchScopeSubtree","searchFilter"],"properties":{"enableSync":{"description":"Whether to enable LDAP syncing. If false, all other fields are ignored","type":"boolean"},"groupDN":{"description":"The distinguished name of the LDAP group. Required if selectGroupMembers is true, ignored otherwise","type":"string"},"groupMemberAttr":{"description":"The name of the LDAP group entry attribute which corresponds to distinguished names of members. Required if selectGroupMembers is true, ignored otherwise","type":"string"},"searchBaseDN":{"description":"The distinguished name of the element from which the LDAP server will search for users. Required if selectGroupMembers is false, ignored otherwise","type":"string"},"searchFilter":{"description":"The LDAP search filter used to select users if selectGroupMembers is false, may be left blank","type":"string"},"searchScopeSubtree":{"description":"Whether to search for users in the entire subtree of the base DN or to only search one level under the base DN (if false). Required if selectGroupMembers is false, ignored otherwise","type":"boolean"},"selectGroupMembers":{"description":"Whether to sync using a group DN and member attribute selection or to use a search filter (if false)","type":"boolean"}}},"forms.SAMLSettings":{"id":"forms.SAMLSettings","required":["idpMetadataURL","spHost","rootCerts","tlsSkipVerify"],"properties":{"idpMetadataURL":{"description":"The Identity Provider's Metadata URL'","type":"string"},"rootCerts":{"description":"Root Certs to access IdP Metadata","type":"string"},"spHost":{"description":"The Host address of the Service Provider","type":"string"},"tlsSkipVerify":{"description":"Option for TLSSkipVerify","type":"boolean"}}},"forms.SessionsConfig":{"id":"forms.SessionsConfig","required":["lifetimeMinutes","renewalThresholdMinutes","perUserLimit"],"properties":{"lifetimeMinutes":{"description":"Specifies the initial lifetime (in minutes) of a session from the moment it is generated, minimum is 10 minutes","type":"integer","format":"integer"},"perUserLimit":{"description":"Indicates the maximum number of sessions that any user can have active at any given time. If creating a new session would put a user over this limit then the least recently used session will be deleted. A value of zero disables limiting the number of sessions that users may have","type":"integer","format":"integer"},"renewalThresholdMinutes":{"description":"Indicates a period of time (in minutes) before the expiration of a session where, if used, a session will be extended by the current configured lifetime from then, a zero value disables session extension, maximum is 5 minutes less than initial session lifetime","type":"integer","format":"integer"}}},"forms.SetMembership":{"id":"forms.SetMembership","properties":{"isAdmin":{"description":"Whether the member should be an admin of the organization or team (default false), unchanged if nil or omitted","type":"boolean"}}},"forms.UpdateAccount":{"id":"forms.UpdateAccount","properties":{"fullName":{"description":"Full name of account, unchanged if null or omitted","type":"string"},"isActive":{"description":"Whether the user is active and can login (users only), unchanged if null or omitted","type":"boolean"},"isAdmin":{"description":"Whether the user is an admin (users only), unchanged if null or omitted","type":"boolean"}}},"forms.UpdateAccountPublicKey":{"id":"forms.UpdateAccountPublicKey","properties":{"certificates":{"description":"certificates for the public key","type":"array","items":{"$ref":"#/definitions/forms.Certificate"}},"label":{"description":"Label or description for the key","type":"string"}}},"forms.UpdateTeam":{"id":"forms.UpdateTeam","properties":{"description":{"description":"Description of the team, unchanged if nil or omitted","type":"string"},"name":{"description":"Name of the team, unchanged if nil or omitted","type":"string"}}},"forms.UserSearchOpts":{"id":"forms.UserSearchOpts","required":["baseDN","scopeSubtree","usernameAttr","fullNameAttr","filter","matchGroup","matchGroupDN","matchGroupMemberAttr","matchGroupIterate"],"properties":{"baseDN":{"description":"The distinguished name of the element from which the LDAP server will search for users","type":"string"},"filter":{"description":"The LDAP search filter used to select user elements, may be left blank","type":"string"},"fullNameAttr":{"description":"The name of the attribute of the LDAP user element which should be selected as the full name of the user","type":"string"},"matchGroup":{"description":"Whether to additionally filter users to those who are direct members of a group","type":"boolean"},"matchGroupDN":{"description":"The distinguished name of the LDAP group. Required if matchGroup is true, ignored otherwise","type":"string"},"matchGroupIterate":{"description":"Whether to iterate through the group members and perform a lookup for each one separately to get all of the user attributes, instead of searching users first then apply the group selection filter. Ignored if matchGroup is false","type":"boolean"},"matchGroupMemberAttr":{"description":"The name of the LDAP group entry attribute which corresponds to distinguished names of members. Required if matchGroup is true, ignored otherwise","type":"string"},"scopeSubtree":{"description":"Whether to search for users in the entire subtree of the base DN or to only search one level under the base DN (if false)","type":"boolean"},"usernameAttr":{"description":"The name of the attribute of the LDAP user element which should be selected as the username","type":"string"}}},"responses.Account":{"id":"responses.Account","required":["name","id","fullName","isOrg"],"properties":{"fullName":{"description":"Full Name of the account","type":"string"},"id":{"description":"ID of the account","type":"string"},"isActive":{"description":"Whether the user is active and can login (users only)","type":"boolean"},"isAdmin":{"description":"Whether the user is a system admin (users only)","type":"boolean"},"isImported":{"description":"Whether the user was imported from an upstream identity provider","type":"boolean"},"isOrg":{"description":"Whether the account is an organization (or user)","type":"boolean"},"membersCount":{"description":"The number of members of the organization","type":"integer","format":"int32"},"name":{"description":"Name of the account","type":"string"},"teamsCount":{"description":"The number of teams in the organization","type":"integer","format":"int32"}}},"responses.AccountPublicKey":{"id":"responses.AccountPublicKey","required":["id","accountID","publicKey","label"],"properties":{"accountID":{"description":"the ID of the account","type":"string"},"certificates":{"description":"certificates for the public key","type":"array","items":{"$ref":"#/definitions/responses.Certificate"}},"id":{"description":"the hash of the public key's DER bytes","type":"string"},"label":{"description":"the label or description for the key","type":"string"},"publicKey":{"description":"the encoded PEM of the public key","type":"string"}}},"responses.AccountPublicKeys":{"id":"responses.AccountPublicKeys","required":["accountPublicKeys","nextPageStart"],"properties":{"accountPublicKeys":{"type":"array","items":{"$ref":"#/definitions/responses.AccountPublicKey"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"}}},"responses.Accounts":{"id":"responses.Accounts","required":["accounts","usersCount","orgsCount","nextPageStart","resourceCount"],"properties":{"accounts":{"type":"array","items":{"$ref":"#/definitions/responses.Account"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"},"orgsCount":{"description":"The total (unpaged) number of organizations (not considering any filters applied to this request)","type":"integer","format":"int32"},"resourceCount":{"description":"The total (unpaged) number of items (not considering any filters applied to the request)","type":"integer","format":"int32"},"usersCount":{"description":"The total (unpaged) number of users (not considering any filters applied to this request)","type":"integer","format":"int32"}}},"responses.BulkResult":{"id":"responses.BulkResult","required":["op","success"],"properties":{"error":{"description":"If not successful, the error encountered when performing the operation on this resource","$ref":"#/definitions/errors.APIError"},"op":{"description":"The operation which was perfomed","type":"string"},"ref":{"description":"The corresponding identifier in the bulk operation request","type":"string"},"success":{"description":"Whether the bulk operation was successful for this resource","type":"boolean"}}},"responses.BulkResults":{"id":"responses.BulkResults","required":["results"],"properties":{"results":{"description":"List of results for the bulk operation. The index of a bulk result corresponds to the index of the resource in the bulk request if result identifiers are omitted","type":"array","items":{"$ref":"#/definitions/responses.BulkResult"}}}},"responses.Certificate":{"id":"responses.Certificate","required":["label","cert"],"properties":{"cert":{"description":"Encoded PEM for the cert","type":"string"},"label":{"description":"Label for the certificate","type":"string"}}},"responses.Grant":{"id":"responses.Grant","required":["subjectID","objectID","roleID"],"properties":{"objectID":{"description":"ID of the object managed by the service","type":"string"},"roleID":{"description":"ID of the role managed by the service","type":"string"},"subjectID":{"description":"ID of the subject of this grant","type":"string"}}},"responses.GrantSubject":{"id":"responses.GrantSubject","required":["id","subject_type"],"properties":{"account":{"description":"The account associated with this subject, if any","$ref":"#/definitions/responses.Account"},"id":{"description":"ID of this subject","type":"string"},"subject_type":{"description":"The type of this subject (anonymous, authenticated, user, team, org)","type":"string"},"team":{"description":"The team associated with this subject, if any","$ref":"#/definitions/responses.Team"}}},"responses.Grants":{"id":"responses.Grants","required":["grants","subjects","nextPageStart","resourceCount"],"properties":{"grants":{"type":"array","items":{"$ref":"#/definitions/responses.Grant"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"},"resourceCount":{"description":"The total (unpaged) number of items (not considering any filters applied to the request)","type":"integer","format":"int32"},"subjects":{"type":"array","items":{"$ref":"#/definitions/responses.GrantSubject"}}}},"responses.GroupLinkOpts":{"id":"responses.GroupLinkOpts","required":["enableLink","groupName"],"properties":{"enableLink":{"description":"Whether to enable SAML linking. If false, all other fields are ignored","type":"boolean"},"groupName":{"description":"The group name that is obtained from group attribute of the SAML assertion","type":"string"}}},"responses.LDAPDomainServerConfig":{"id":"responses.LDAPDomainServerConfig","required":["domain","rootCerts","tlsSkipVerify","readerDN","serverURL","noSimplePagination","startTLS"],"properties":{"domain":{"description":"The root domain component of this server, e.g., dc=example,dc=com. A longest-suffix match of the base DN for LDAP searches is used to select which LDAP server to use for search requests. If no matching domain is found, the default LDAP server config is used.","type":"string"},"noSimplePagination":{"description":"The server does not support the Simple Paged Results control extension (RFC 2696)","type":"boolean"},"readerDN":{"description":"The distinguished name the system will use to bind to the LDAP server when performing searches","type":"string"},"rootCerts":{"description":"A root certificate bundle to use when establishing a TLS connection to the server","type":"string"},"serverURL":{"description":"The URL of the LDAP server","type":"string"},"startTLS":{"description":"Whether to use StartTLS to secure the connection to the server, ignored if server URL scheme is 'ldaps://'","type":"boolean"},"tlsSkipVerify":{"description":"Whether to skip verifying of the server's certificate when establishing a TLS connection, not recommended unless testing on a secure network","type":"boolean"}}},"responses.LDAPSettings":{"id":"responses.LDAPSettings","required":["tlsSkipVerify","readerDN","serverURL","noSimplePagination","startTLS","rootCerts","additionalDomains","userSearchConfigs","adminSyncOpts","syncSchedule","jitUserProvisioning"],"properties":{"additionalDomains":{"description":"A list of additional LDAP domains (and corresponding server configs) from which to sync users and team members","type":"array","items":{"$ref":"#/definitions/responses.LDAPDomainServerConfig"}},"adminSyncOpts":{"description":"Settings for syncing system admin users","$ref":"#/definitions/responses.MemberSyncOpts"},"jitUserProvisioning":{"description":"Whether to only create user accounts upon first login (recommended)","type":"boolean"},"noSimplePagination":{"description":"The server does not support the Simple Paged Results control extension (RFC 2696)","type":"boolean"},"readerDN":{"description":"The distinguished name the system will use to bind to the LDAP server when performing searches","type":"string"},"rootCerts":{"description":"A root certificate bundle to use when establishing a TLS connection to the server","type":"string"},"serverURL":{"description":"The URL of the LDAP server","type":"string"},"startTLS":{"description":"Whether to use StartTLS to secure the connection to the server, ignored if server URL scheme is 'ldaps://'","type":"boolean"},"syncSchedule":{"description":"The sync job schedule in CRON format","type":"string"},"tlsSkipVerify":{"description":"Whether to skip verifying of the server's certificate when establishing a TLS connection, not recommended unless testing on a secure network","type":"boolean"},"userSearchConfigs":{"description":"One or more settings for syncing users","type":"array","items":{"$ref":"#/definitions/responses.UserSearchOpts"}}}},"responses.Member":{"id":"responses.Member","required":["member","isAdmin"],"properties":{"isAdmin":{"description":"Whether the member is an admin of the organization or team","type":"boolean"},"member":{"description":"The user which is a member of the organization or team","$ref":"#/definitions/responses.Account"}}},"responses.MemberOrg":{"id":"responses.MemberOrg","required":["org","isAdmin"],"properties":{"isAdmin":{"description":"Whether the user is an admin of the organization","type":"boolean"},"org":{"description":"The organization which the user is a member of","$ref":"#/definitions/responses.Account"}}},"responses.MemberOrgs":{"id":"responses.MemberOrgs","required":["memberOrgs","nextPageStart"],"properties":{"memberOrgs":{"type":"array","items":{"$ref":"#/definitions/responses.MemberOrg"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"}}},"responses.MemberSyncOpts":{"id":"responses.MemberSyncOpts","required":["enableSync","selectGroupMembers","groupDN","groupMemberAttr","searchBaseDN","searchScopeSubtree","searchFilter"],"properties":{"enableSync":{"description":"Whether to enable LDAP syncing. If false, all other fields are ignored","type":"boolean"},"groupDN":{"description":"The distinguished name of the LDAP group. Applicable only if selectGroupMembers is true, ignored otherwise","type":"string"},"groupMemberAttr":{"description":"The name of the LDAP group entry attribute which corresponds to distinguished names of members. Applicable only if selectGroupMembers is true, ignored otherwise","type":"string"},"searchBaseDN":{"description":"The distinguished name of the element from which the LDAP server will search for users. Applicable only if selectGroupMembers is false, ignored otherwise","type":"string"},"searchFilter":{"description":"The LDAP search filter used to select users if selectGroupMembers is false, may be left blank","type":"string"},"searchScopeSubtree":{"description":"Whether to search for users in the entire subtree of the base DN or to only search one level under the base DN (if false). Applicable only if selectGroupMembers is false, ignored otherwise","type":"boolean"},"selectGroupMembers":{"description":"Whether to sync using a group DN and member attribute selection or to use a search filter (if false)","type":"boolean"}}},"responses.MemberTeam":{"id":"responses.MemberTeam","required":["team","isAdmin"],"properties":{"isAdmin":{"description":"Whether the user is an admin of the team","type":"boolean"},"team":{"description":"The team which the user is a member of","$ref":"#/definitions/responses.Team"}}},"responses.MemberTeams":{"id":"responses.MemberTeams","required":["memberTeams","nextPageStart"],"properties":{"memberTeams":{"type":"array","items":{"$ref":"#/definitions/responses.MemberTeam"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"}}},"responses.Members":{"id":"responses.Members","required":["members","nextPageStart","resourceCount"],"properties":{"members":{"type":"array","items":{"$ref":"#/definitions/responses.Member"}},"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"},"resourceCount":{"description":"The total (unpaged) number of items (not considering any filters applied to the request)","type":"integer","format":"int32"}}},"responses.Team":{"id":"responses.Team","required":["orgID","name","id","description","membersCount"],"properties":{"description":{"description":"Description of the team","type":"string"},"id":{"description":"ID of the team","type":"string"},"membersCount":{"description":"The number of members of the team","type":"integer","format":"int32"},"name":{"description":"Name of the team","type":"string"},"orgID":{"description":"ID of the organization to which this team belongs","type":"string"}}},"responses.Teams":{"id":"responses.Teams","required":["teams","nextPageStart","resourceCount"],"properties":{"nextPageStart":{"description":"The page start value which can be used to request the next batch of items (empty if there are no more items remaining)","type":"string"},"resourceCount":{"description":"The total (unpaged) number of items (not considering any filters applied to the request)","type":"integer","format":"int32"},"teams":{"type":"array","items":{"$ref":"#/definitions/responses.Team"}}}},"responses.UserSearchOpts":{"id":"responses.UserSearchOpts","required":["baseDN","scopeSubtree","usernameAttr","fullNameAttr","filter","matchGroup","matchGroupDN","matchGroupMemberAttr","matchGroupIterate"],"properties":{"baseDN":{"description":"The distinguished name of the element from which the LDAP server will search for users","type":"string"},"filter":{"description":"The LDAP search filter used to select user elements, may be left blank","type":"string"},"fullNameAttr":{"description":"The name of the attribute of the LDAP user element which should be selected as the full name of the user","type":"string"},"matchGroup":{"description":"Whether to additionally filter users to those who are direct members of a group","type":"boolean"},"matchGroupDN":{"description":"The distinguished name of the LDAP group. Required if matchGroup is true, ignored otherwise","type":"string"},"matchGroupIterate":{"description":"Whether to iterate through the group members and perform a lookup for each one separately to get all of the user attributes, instead of searching users first then apply the group selection filter. Ignored if matchGroup is false","type":"boolean"},"matchGroupMemberAttr":{"description":"The name of the LDAP group entry attribute which corresponds to distinguished names of members. Required if matchGroup is true, ignored otherwise","type":"string"},"scopeSubtree":{"description":"Whether to search for users in the entire subtree of the base DN or to only search one level under the base DN (if false)","type":"boolean"},"usernameAttr":{"description":"The name of the attribute of the LDAP user element which should be selected as the username","type":"string"}}},"role.Role":{"id":"role.Role","required":["id","name","system_role","operations"],"properties":{"id":{"type":"string"},"name":{"type":"string"},"operations":{"type":"object"},"system_role":{"type":"boolean"}}},"types.ClusterConfig":{"id":"types.ClusterConfig","required":["ControllerPort","KubeAPIServerPort","ProxyKubeAPIServerPort","SwarmPort","SwarmStrategy","DNS","DNSOpt","DNSSearch","KVTimeout","KVSnapshotCount","ProfilingEnabled","ExternalServiceLB","MetricsRetentionTime","MetricsScrapeInterval","ProxyMetricsScrapeInterval","RethinkDBCacheSize","ExcludeServerIdentityHeaders","CloudProvider","CNIInstallerURL","PodCIDR","CalicoMTU","IPIPMTU","UnmanagedCNI","NodePortRange","AzureIPCount","CustomKubeAPIServerFlags","CustomKubeControllerManagerFlags","CustomKubeletFlags","CustomKubeSchedulerFlags","LocalVolumeCollectionMapping","AuthKeySecretID","managerKubeReservedResources","workerKubeReservedResources"],"properties":{"AuthKeySecretID":{"type":"string"},"AzureIPCount":{"type":"string"},"CNIInstallerURL":{"type":"string"},"CalicoMTU":{"type":"string"},"CloudProvider":{"type":"string"},"ControllerPort":{"type":"integer","format":"int32"},"CustomKubeAPIServerFlags":{"type":"array","items":{"type":"string"}},"CustomKubeControllerManagerFlags":{"type":"array","items":{"type":"string"}},"CustomKubeSchedulerFlags":{"type":"array","items":{"type":"string"}},"CustomKubeletFlags":{"type":"array","items":{"type":"string"}},"DNS":{"type":"array","items":{"type":"string"}},"DNSOpt":{"type":"array","items":{"type":"string"}},"DNSSearch":{"type":"array","items":{"type":"string"}},"ExcludeServerIdentityHeaders":{"type":"boolean"},"ExternalServiceLB":{"type":"string"},"IPIPMTU":{"type":"string"},"KVSnapshotCount":{"type":"integer","format":"int32"},"KVTimeout":{"type":"integer","format":"int32"},"KubeAPIServerPort":{"type":"integer","format":"int32"},"LocalVolumeCollectionMapping":{"type":"boolean"},"MetricsRetentionTime":{"type":"string"},"MetricsScrapeInterval":{"type":"string"},"NodePortRange":{"type":"string"},"PodCIDR":{"type":"string"},"ProfilingEnabled":{"type":"boolean"},"ProxyKubeAPIServerPort":{"type":"integer","format":"int32"},"ProxyMetricsScrapeInterval":{"type":"string"},"RethinkDBCacheSize":{"type":"string"},"SwarmPort":{"type":"integer","format":"int32"},"SwarmStrategy":{"type":"string"},"UnmanagedCNI":{"type":"boolean"},"managerKubeReservedResources":{"type":"string"},"workerKubeReservedResources":{"type":"string"}}},"v1.FinalizerName":{"id":"v1.FinalizerName"},"v1.Initializer":{"id":"v1.Initializer","description":"Initializer is information about an initializer that has not yet completed.","required":["name"],"properties":{"name":{"description":"name of the process that is responsible for initializing this object.","type":"string"}}},"v1.Initializers":{"id":"v1.Initializers","description":"Initializers tracks the progress of initialization.","required":["pending"],"properties":{"pending":{"description":"Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.","type":"array","items":{"$ref":"#/definitions/v1.Initializer"}},"result":{"description":"If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.","$ref":"#/definitions/v1.Status"}}},"v1.ListMeta":{"id":"v1.ListMeta","description":"ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.","properties":{"continue":{"description":"continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response.","type":"string"},"resourceVersion":{"description":"String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is a URL representing this object. Populated by the system. Read-only.","type":"string"}}},"v1.Namespace":{"id":"v1.Namespace","description":"Namespace provides a scope for Names. Use of multiple namespaces is optional.","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata","$ref":"#/definitions/v1.ObjectMeta"},"spec":{"description":"Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status","$ref":"#/definitions/v1.NamespaceSpec"},"status":{"description":"Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status","$ref":"#/definitions/v1.NamespaceStatus"}}},"v1.NamespaceList":{"id":"v1.NamespaceList","description":"NamespaceList is a list of Namespaces.","required":["items"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources","type":"string"},"items":{"description":"Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/","type":"array","items":{"$ref":"#/definitions/v1.Namespace"}},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","$ref":"#/definitions/v1.ListMeta"}}},"v1.NamespaceSpec":{"id":"v1.NamespaceSpec","description":"NamespaceSpec describes the attributes on a Namespace.","properties":{"finalizers":{"description":"Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/","type":"array","items":{"$ref":"#/definitions/v1.FinalizerName"}}}},"v1.NamespaceStatus":{"id":"v1.NamespaceStatus","description":"NamespaceStatus is information about the current status of a Namespace.","properties":{"phase":{"description":"Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/","type":"string"}}},"v1.ObjectMeta":{"id":"v1.ObjectMeta","description":"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.","properties":{"annotations":{"description":"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations","type":"object"},"clusterName":{"description":"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.","type":"string"},"creationTimestamp":{"description":"CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata","type":"string"},"deletionGracePeriodSeconds":{"description":"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.","type":"integer","format":"int64"},"deletionTimestamp":{"description":"DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata","type":"string"},"finalizers":{"description":"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.","type":"array","items":{"type":"string"}},"generateName":{"description":"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency","type":"string"},"generation":{"description":"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.","type":"integer","format":"int64"},"initializers":{"description":"An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.","$ref":"#/definitions/v1.Initializers"},"labels":{"description":"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels","type":"object"},"name":{"description":"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names","type":"string"},"namespace":{"description":"Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces","type":"string"},"ownerReferences":{"description":"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.","type":"array","items":{"$ref":"#/definitions/v1.OwnerReference"}},"resourceVersion":{"description":"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"SelfLink is a URL representing this object. Populated by the system. Read-only.","type":"string"},"uid":{"description":"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids","type":"string"}}},"v1.OwnerReference":{"id":"v1.OwnerReference","description":"OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.","required":["apiVersion","kind","name","uid"],"properties":{"apiVersion":{"description":"API version of the referent.","type":"string"},"blockOwnerDeletion":{"description":"If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.","type":"boolean"},"controller":{"description":"If true, this reference points to the managing controller.","type":"boolean"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"name":{"description":"Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names","type":"string"},"uid":{"description":"UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids","type":"string"}}},"v1.Status":{"id":"v1.Status","description":"Status is a return value for calls that don't return other objects.","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources","type":"string"},"code":{"description":"Suggested HTTP return code for this status, 0 if not set.","type":"integer","format":"int32"},"details":{"description":"Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.","$ref":"#/definitions/v1.StatusDetails"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"message":{"description":"A human-readable description of the status of this operation.","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","$ref":"#/definitions/v1.ListMeta"},"reason":{"description":"A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.","type":"string"},"status":{"description":"Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status","type":"string"}}},"v1.StatusCause":{"id":"v1.StatusCause","description":"StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.","properties":{"field":{"description":"The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"","type":"string"},"message":{"description":"A human-readable description of the cause of the error. This field may be presented as-is to a reader.","type":"string"},"reason":{"description":"A machine-readable description of the cause of the error. If this value is empty there is no information available.","type":"string"}}},"v1.StatusDetails":{"id":"v1.StatusDetails","description":"StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.","properties":{"causes":{"description":"The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.","type":"array","items":{"$ref":"#/definitions/v1.StatusCause"}},"group":{"description":"The group attribute of the resource associated with the status StatusReason.","type":"string"},"kind":{"description":"The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds","type":"string"},"name":{"description":"The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).","type":"string"},"retryAfterSeconds":{"description":"If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.","type":"integer","format":"int32"},"uid":{"description":"UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids","type":"string"}}},"||authz.Collection":{"id":"||authz.Collection"},"||authz.RoleCreateResponse":{"id":"||authz.RoleCreateResponse"},"||role.Role":{"id":"||role.Role"}},"securityDefinitions":{"BearerToken":{"type":"apiKey","name":"Authorization","in":"header"}},"security":[{"BearerToken":[]}],"tags":[{"description":"Create and manage containers.\n","name":"Container","x-displayName":"Containers"},{"name":"Image","x-displayName":"Images"},{"description":"Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information.\n","name":"Network","x-displayName":"Networks"},{"description":"Create and manage persistent storage that can be attached to containers.\n","name":"Volume","x-displayName":"Volumes"},{"description":"Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information.\n\nTo exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`.\n","name":"Exec","x-displayName":"Exec"},{"description":"Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information.\n","name":"Swarm","x-displayName":"Swarm"},{"description":"Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work.\n","name":"Node","x-displayName":"Nodes"},{"description":"Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work.\n","name":"Service","x-displayName":"Services"},{"description":"A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work.\n","name":"Task","x-displayName":"Tasks"},{"description":"Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work.\n","name":"Secret","x-displayName":"Secrets"},{"description":"Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work.\n","name":"Config","x-displayName":"Configs"},{"name":"Plugin","x-displayName":"Plugins"},{"name":"System","x-displayName":"System"},{"description":"API endpoints which are specific to UCP","name":"UCP"}]}, dom_id: '#swagger-ui', validatorUrl: null, presets: [ diff --git a/reference/ucp/3.1/cli/install.md b/reference/ucp/3.1/cli/install.md index 5528f1cdc3..96979016cc 100644 --- a/reference/ucp/3.1/cli/install.md +++ b/reference/ucp/3.1/cli/install.md @@ -38,11 +38,13 @@ If you have SELinux policies enabled for your Docker install, you will need to use `docker container run --rm -it --security-opt label=disable ...` when running this command. +If you are installing on Azure, see [Install UCP on Azure](/ee/ucp/admin/install/install-on-azure/). + ## Options | Option | Description | |:-------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `--admin-password` | The UCP administrator password | +| `--admin-password` | The UCP administrator password. Must be at least 8 characters. | | `--admin-username` | The UCP administrator username | | `--binpack` | Set the Docker Swarm scheduler to binpack mode. Used for backwards compatibility | | `--cloud-provider` | The cloud provider for the cluster @@ -69,6 +71,7 @@ command. | `--kv-timeout` | Timeout in milliseconds for the key-value store | | `--license` | Add a license: e.g.` --license "$(cat license.lic)" ` | | `--pod-cidr` | Kubernetes cluster IP pool for the pods to allocated IPs from (Default: `192.168.0.0/16`) | +|`--service-cluster-ip-range`| Sets the subnet pool from which the IP for Services should be allocated (Default is `10.96.0.0/16`). | | `--preserve-certs` | Don't generate certificates if they already exist | | `--pull` | Pull UCP images: `always`, when `missing`, or `never` | | `--random` | Set the Docker Swarm scheduler to random mode. Used for backwards compatibility | diff --git a/reference/ucp/3.1/cli/upgrade.md b/reference/ucp/3.1/cli/upgrade.md index 8c849dce8a..de7d72ae24 100644 --- a/reference/ucp/3.1/cli/upgrade.md +++ b/reference/ucp/3.1/cli/upgrade.md @@ -45,4 +45,3 @@ healthy and that all nodes have been upgraded successfully. | `--pod-cidr` | Kubernetes cluster IP pool for the pods to allocated IP. The default IP pool is `192.168.0.0/16`. | | `--nodeport-range` | Allowed port range for Kubernetes services of type `NodePort`. The default port range is `32768-35535`. | | `--cloud-provider` | The cloud provider for the cluster | -| `--unmanaged-cni` | Flag to indicate if CNI provider is Calico and managed by UCP. Calico is the default CNI provider. The default value is `true` when using the default Calico CNI. | diff --git a/release-notes/docker-compose.md b/release-notes/docker-compose.md index 5021768df4..3cd367596a 100644 --- a/release-notes/docker-compose.md +++ b/release-notes/docker-compose.md @@ -5,7 +5,84 @@ keywords: release notes, compose toc_max: 2 --- -## 1.23.1 (2018-11-01) +## 1.24.0 +(2019-03-28) + +### Features + +- Added support for connecting to the Docker Engine using the `ssh` protocol. + +- Added an `--all` flag to `docker-compose ps` to include stopped one-off containers + in the command's output. + +- Added bash completion for `ps --all|-a`. + +- Added support for credential_spec. + +- Added `--parallel` to `docker build`'s options in `bash` and `zsh` completion. + +### Bug Fixes + +- Fixed a bug where some valid credential helpers weren't properly handled by Compose + when attempting to pull images from private registries. + +- Fixed an issue where the output of `docker-compose start` before containers were created + was misleading. + +- Compose will no longer accept whitespace in variable names sourced from environment files. + This matches the Docker CLI behavior. + +- Compose will now report a configuration error if a service attempts to declare + duplicate mount points in the volumes section. + +- Fixed an issue with the containerized version of Compose that prevented users from + writing to stdin during interactive sessions started by `run` or `exec`. + +- One-off containers started by `run` no longer adopt the restart policy of the service, + and are instead set to never restart. + +- Fixed an issue that caused some container events to not appear in the output of + the `docker-compose events` command. + +- Missing images will no longer stop the execution of `docker-compose down` commands. A warning is + now displayed instead. + +- Force `virtualenv` version for macOS CI. + +- Fixed merging of Compose files when network has `None` config. + +- Fixed `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller`. + +- Bumped `docker-py` version to `3.7.2` to fix SSH and proxy configuration issues. + +- Fixed release script and some typos on release documentation. + +## 1.23.2 +(2018-11-28) + +### Bug Fixes + +- Reverted a 1.23.0 change that appended random strings to container names + created by `docker-compose up`, causing addressability issues. + > **Note**: Containers created by `docker-compose run` will continue to use + randomly generated names to avoid collisions during parallel runs. + +- Fixed an issue where some `dockerfile` paths would fail unexpectedly when + attempting to build on Windows. + +- Fixed a bug where build context URLs would fail to build on Windows. + +- Fixed a bug that caused `run` and `exec` commands to fail for some otherwise + accepted values of the `--host` parameter. + +- Fixed an issue where overrides for the `storage_opt` and `isolation` keys in + service definitions weren't properly applied. + +- Fixed a bug where some invalid Compose files would raise an uncaught + exception during validation. + +## 1.23.1 +(2018-11-01) ### Bug Fixes @@ -15,7 +92,8 @@ toc_max: 2 - Fixed an issue where the behavior of the `--project-directory` flag would vary depending on which subcommand was used. -## 1.23.0 (2018-10-30) +## 1.23.0 +(2018-10-30) ### Important note @@ -89,7 +167,8 @@ naming scheme accordingly before upgrading. - The `zsh` completion script has been updated with new options, and no longer suggests container names where service names are expected. -## 1.22.0 (2018-07-17) +## 1.22.0 +(2018-07-17) ### New features @@ -142,14 +221,16 @@ naming scheme accordingly before upgrading. - Fixed a bug that caused auth values in legacy `.dockercfg` files to be ignored - `docker-compose build` will no longer attempt to create image names starting with an invalid character -## 1.21.2 (2018-05-03) +## 1.21.2 +(2018-05-03) ### Bug Fixes - Fixed a bug where the ip_range attribute in IPAM configs was prevented from passing validation -## 1.21.1 (2018-04-27) +## 1.21.1 +(2018-04-27) ### Bug Fixes @@ -174,7 +255,8 @@ naming scheme accordingly before upgrading. elements with some v3.2 files, triggering errors at the Engine level during deployment. -## 1.21.0 (2018-04-11) +## 1.21.0 +(2018-04-11) ### New features @@ -237,7 +319,8 @@ naming scheme accordingly before upgrading. recognized as inexistent by Compose, interrupting otherwise valid operations. -## 1.20.0 (2018-03-20) +## 1.20.0 +(2018-03-20) ### New features @@ -323,7 +406,8 @@ naming scheme accordingly before upgrading. - Fixed an encoding bug when streaming build progress -## 1.19.0 (2018-02-07) +## 1.19.0 +(2018-02-07) ### Breaking changes @@ -415,7 +499,8 @@ naming scheme accordingly before upgrading. containing scalar types (number, boolean) now get automatically converted to strings -## 1.18.0 (2017-12-18) +## 1.18.0 +(2017-12-18) ### New features @@ -497,7 +582,8 @@ naming scheme accordingly before upgrading. - The CLI now explicit prevents using `-d` and `--timeout` together in `docker-compose up` -## 1.17.0 (2017-11-01) +## 1.17.0 +(2017-11-01) ### New features @@ -554,7 +640,8 @@ naming scheme accordingly before upgrading. - Fixed an issue where networks with identical names would sometimes be created when running `up` commands concurrently. -## 1.16.0 (2017-08-31) +## 1.16.0 +(2017-08-31) ### New features @@ -613,7 +700,8 @@ naming scheme accordingly before upgrading. - Fixed the output of `docker-compose config` when a port definition used `0` as the value for the published port -## 1.15.0 (2017-07-26) +## 1.15.0 +(2017-07-26) ### New features @@ -659,7 +747,8 @@ naming scheme accordingly before upgrading. - Fixed an issue preventing `up` operations on a previously created stack on Windows Engine. -## 1.14.0 (2017-06-19) +## 1.14.0 +(2017-06-19) ### New features @@ -713,7 +802,8 @@ naming scheme accordingly before upgrading. - Fixed a bug where the output of `docker-compose config` would sometimes contain invalid port definitions -## 1.13.0 (2017-05-02) +## 1.13.0 +(2017-05-02) ### Breaking changes @@ -768,7 +858,8 @@ naming scheme accordingly before upgrading. `volumes` would result in an invalid config state -## 1.12.0 (2017-04-04) +## 1.12.0 +(2017-04-04) ### New features @@ -864,7 +955,8 @@ naming scheme accordingly before upgrading. - Fixed an issue where Compose would not pick up on the value of COMPOSE_TLS_VERSION when used in combination with command-line TLS flags -## 1.11.2 (2017-02-17) +## 1.11.2 +(2017-02-17) ### Bug Fixes @@ -884,14 +976,16 @@ naming scheme accordingly before upgrading. - Fixed an issue where recursive wildcard patterns `**` were not being recognized in `.dockerignore` files. -## 1.11.1 (2017-02-09) +## 1.11.1 +(2017-02-09) ### Bug Fixes - Fixed a bug where the 3.1 file format was not being recognized as valid by the Compose parser -## 1.11.0 (2017-02-08) +## 1.11.0 +(2017-02-08) ### New Features @@ -914,7 +1008,8 @@ naming scheme accordingly before upgrading. - Fixed an issue where the `pid` entry in a service definition was being ignored when using multiple Compose files. -## 1.10.1 (2017-02-01) +## 1.10.1 +(2017-02-01) ### Bug Fixes @@ -936,7 +1031,8 @@ naming scheme accordingly before upgrading. - Fixed a bug where Compose would occasionally crash while streaming logs when containers would stop or restart -## 1.10.0 (2017-01-18) +## 1.10.0 +(2017-01-18) ### New Features @@ -983,7 +1079,8 @@ naming scheme accordingly before upgrading. being parsed correctly on Windows -## 1.9.0 (2016-11-16) +## 1.9.0 +(2016-11-16) **Breaking changes** @@ -1040,7 +1137,8 @@ naming scheme accordingly before upgrading. mismatch for overlay networks. -## 1.8.1 (2016-09-22) +## 1.8.1 +(2016-09-22) ### Bug Fixes @@ -1081,7 +1179,8 @@ naming scheme accordingly before upgrading. a connection timeout. -## 1.8.0 (2016-06-14) +## 1.8.0 +(2016-06-14) ### Breaking Changes @@ -1142,7 +1241,8 @@ naming scheme accordingly before upgrading. descriptive error messages when something goes wrong. -## 1.7.1 (2016-05-04) +## 1.7.1 +(2016-05-04) ### Bug Fixes @@ -1182,7 +1282,8 @@ naming scheme accordingly before upgrading. location as the Compose file. -## 1.7.0 (2016-04-13) +## 1.7.0 +(2016-04-13) ### Breaking Changes @@ -1269,12 +1370,14 @@ naming scheme accordingly before upgrading. - Fixed a bug where empty values for build args would cause file validation to fail. -## 1.6.2 (2016-02-23) +## 1.6.2 +(2016-02-23) - Fixed a bug where connecting to a TLS-enabled Docker Engine would fail with a certificate verification error. -## 1.6.1 (2016-02-23) +## 1.6.1 +(2016-02-23) ### Bug Fixes @@ -1328,7 +1431,8 @@ naming scheme accordingly before upgrading. as a value in a mapping. -## 1.6.0 (2016-01-15) +## 1.6.0 +(2016-01-15) ### Major Features: @@ -1444,7 +1548,8 @@ naming scheme accordingly before upgrading. non-standard logging driver (or none at all). -## 1.5.2 (2015-12-03) +## 1.5.2 +(2015-12-03) - Fixed a bug which broke the use of `environment` and `env_file` with `extends`, and caused environment keys without values to have a `None` @@ -1465,7 +1570,8 @@ naming scheme accordingly before upgrading. - Improved the validation of the `expose` option -### 1.5.1 (2015-11-12) +## 1.5.1 +(2015-11-12) - Add the `--force-rm` option to `build`. @@ -1517,7 +1623,8 @@ naming scheme accordingly before upgrading. error message. -## 1.5.0 (2015-11-03) +## 1.5.0 +(2015-11-03) ### Breaking changes @@ -1617,12 +1724,14 @@ https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-sub - `docker-compose build` can now be run successfully against a Swarm cluster. -## 1.4.2 (2015-09-22) +## 1.4.2 +(2015-09-22) - Fixed a regression in the 1.4.1 release that would cause `docker-compose up` without the `-d` option to exit immediately. -## 1.4.1 (2015-09-10) +## 1.4.1 +(2015-09-10) ### Bug fixes @@ -1637,7 +1746,8 @@ https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-sub the configuration had not changed. -## 1.4.0 (2015-08-04) +## 1.4.0 +(2015-08-04) - By default, `docker-compose up` now only recreates containers for services whose configuration has changed since they were created. This should result in a dramatic speed-up for many applications. @@ -1675,14 +1785,16 @@ https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-sub Thanks @mnowster, @dnephin, @ekristen, @funkyfuture, @jeffk and @lukemarsden! -## 1.3.3 (2015-07-15) +## 1.3.3 +(2015-07-15) ### Regression fixes - When stopping containers gracefully, Compose was setting the timeout to 0, effectively forcing a SIGKILL every time. - Compose would sometimes crash depending on the formatting of container data returned from the Docker API. -## 1.3.2 (2015-07-14) +## 1.3.2 +(2015-07-14) ### Bug fixes @@ -1696,7 +1808,8 @@ Thanks @mnowster, @dnephin, @ekristen, @funkyfuture, @jeffk and @lukemarsden! Thanks @dano, @josephpage, @kevinsimper, @lieryan, @phemmer, @soulrebel and @sschepens! -## 1.3.1 (2015-06-21) +## 1.3.1 +(2015-06-21) ### Bug fixes @@ -1704,7 +1817,8 @@ Thanks @dano, @josephpage, @kevinsimper, @lieryan, @phemmer, @soulrebel and @ssc - `docker-compose help migrate-to-labels` failed with an error. - If no network mode was specified, Compose would set it to "bridge", rather than allowing the Docker daemon to use its configured default network mode. -## 1.3.0 (2015-06-18) +## 1.3.0 +(2015-06-18) ### Important notes @@ -1737,7 +1851,7 @@ Several new configuration keys have been added to `docker-compose.yml`: - `security_opt`, like `docker run --security-opt`, lets you specify [security options](https://docs.docker.com/engine/reference/run/#security-configuration). - `log_driver`, like `docker run --log-driver`, lets you specify a [log driver](https://docs.docker.com/engine/reference/run/#logging-drivers-log-driver). -### Bug fixes +### Bug Fixes - The output of `docker-compose run` was sometimes truncated, especially when running under Jenkins. - A service's volumes would sometimes not update after volume configuration was changed in `docker-compose.yml`. @@ -1748,7 +1862,8 @@ Several new configuration keys have been added to `docker-compose.yml`: Thanks @ahromis, @albers, @aleksandr-vin, @antoineco, @ccverak, @chernjie, @dnephin, @edmorley, @fordhurley, @josephpage, @KyleJamesWalker, @lsowen, @mchasal, @noironetworks, @sdake, @sdurrheimer, @sherter, @stephenlawrence, @thaJeztah, @thieman, @turtlemonvh, @twhiteman, @vdemeester, @xuxinkun and @zwily! -## 1.2.0 (2015-04-16) +## 1.2.0 +(2015-04-16) - `docker-compose.yml` now supports an `extends` option, which enables a service to inherit configuration from another service in another configuration file. This is really good for sharing common configuration between apps, or for configuring the same app for different environments. Here's the [documentation](https://github.com/docker/compose/blob/master/docs/yml.md#extends). @@ -1770,7 +1885,8 @@ Thanks @ahromis, @albers, @aleksandr-vin, @antoineco, @ccverak, @chernjie, @dnep Thanks, @abesto, @albers, @alunduil, @dnephin, @funkyfuture, @gilclark, @IanVS, @KingsleyKelly, @knutwalker, @thaJeztah and @vmalloc! -## 1.1.0 (2015-02-25) +## 1.1.0 +(2015-02-25) Fig has been renamed to Docker Compose, or just Compose for short. This has several implications for you: @@ -1802,13 +1918,15 @@ Besides that, there’s a lot of new stuff in this release: Thanks @dnephin, @squebe, @jbalonso, @raulcd, @benlangfield, @albers, @ggtools, @bersace, @dtenenba, @petercv, @drewkett, @TFenby, @paulRbr, @Aigeruth and @salehe! -## 1.0.1 (2014-11-04) +## 1.0.1 +(2014-11-04) - Added an `--allow-insecure-ssl` option to allow `fig up`, `fig run` and `fig pull` to pull from insecure registries. - Fixed `fig run` not showing output in Jenkins. - Fixed a bug where Fig couldn't build Dockerfiles with ADD statements pointing at URLs. -## 1.0.0 (2014-10-16) +## 1.0.0 +(2014-10-16) The highlights: @@ -1851,7 +1969,8 @@ Other things: Thanks @dnephin, @d11wtq, @marksteve, @rubbish, @jbalonso, @timfreund, @alunduil, @mieciu, @shuron, @moss, @suzaku and @chmouel! Whew. -## 0.5.2 (2014-07-28) +## 0.5.2 +(2014-07-28) - Added a `--no-cache` option to `fig build`, which bypasses the cache just like `docker build --no-cache`. - Fixed the `dns:` fig.yml option, which was causing fig to error out. @@ -1861,7 +1980,8 @@ Thanks @dnephin, @d11wtq, @marksteve, @rubbish, @jbalonso, @timfreund, @alunduil Thanks @dnephin and @marksteve! -## 0.5.1 (2014-07-11) +## 0.5.1 +(2014-07-11) - If a service has a command defined, `fig run [service]` with no further arguments will run it. - The project name now defaults to the directory containing fig.yml, not the current working directory (if they're different) @@ -1871,7 +1991,8 @@ Thanks @dnephin and @marksteve! Thanks @ryanbrainard and @d11wtq! -## 0.5.0 (2014-07-11) +## 0.5.0 +(2014-07-11) - Fig now starts links when you run `fig run` or `fig up`. @@ -1908,17 +2029,20 @@ Thanks @ryanbrainard and @d11wtq! Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca, @mozz100 and @marksteve for their help with this release! -## 0.4.2 (2014-06-18) +## 0.4.2 +(2014-06-18) - Fix various encoding errors when using `fig run`, `fig up` and `fig build`. -## 0.4.1 (2014-05-08) +## 0.4.1 +(2014-05-08) - Add support for Docker 0.11.0. (Thanks @marksteve!) - Make project name configurable. (Thanks @jefmathiot!) - Return correct exit code from `fig run`. -## 0.4.0 (2014-04-29) +## 0.4.0 +(2014-04-29) - Support Docker 0.9 and 0.10 - Display progress bars correctly when pulling images (no more ski slopes) @@ -1929,18 +2053,21 @@ Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca, @moz - Handle UTF-8 correctly when streaming `fig build/run/up` output (thanks @mauvm and @shanejonas!) - Error message improvements -## 0.3.2 (2014-03-05) +## 0.3.2 +(2014-03-05) - Added an `--rm` option to `fig run`. (Thanks @marksteve!) - Added an `expose` option to `fig.yml`. -## 0.3.1 (2014-03-04) +## 0.3.1 +(2014-03-04) - Added contribution instructions. (Thanks @kvz!) - Fixed `fig rm` throwing an error. - Fixed a bug in `fig ps` on Docker 0.8.1 when there is a container with no command. -## 0.3.0 (2014-03-03) +## 0.3.0 +(2014-03-03) - We now ship binaries for OS X and Linux. No more having to install with Pip! - Add `-f` flag to specify alternate `fig.yml` files @@ -1952,7 +2079,8 @@ Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca, @moz Thanks @marksteve, @Gazler and @teozkr! -## 0.2.2 (2014-02-17) +## 0.2.2 +(2014-02-17) - Resolve dependencies using Cormen/Tarjan topological sort - Fix `fig up` not printing log output @@ -1961,11 +2089,13 @@ Thanks @marksteve, @Gazler and @teozkr! Thanks to @barnybug and @dustinlacewell for their work on this release. -## 0.2.1 (2014-02-04) +## 0.2.1 +(2014-02-04) - General improvements to error reporting (#77, #79) -## 0.2.0 (2014-01-31) +## 0.2.0 +(2014-01-31) - Link services to themselves so run commands can access the running service. (#67) - Much better documentation. @@ -1974,26 +2104,31 @@ Thanks to @barnybug and @dustinlacewell for their work on this release. Big thanks to @cameronmaske, @mrchrisadams and @damianmoore for their help with this release. -## 0.1.4 (2014-01-27) +## 0.1.4 +(2014-01-27) - Add a link alias without the project name. This makes the environment variables a little shorter: `REDIS_1_PORT_6379_TCP_ADDR`. (#54) -## 0.1.3 (2014-01-23) +## 0.1.3 +(2014-01-23) - Fix ports sometimes being configured incorrectly. (#46) - Fix log output sometimes not displaying. (#47) -## 0.1.2 (2014-01-22) +## 0.1.2 +(2014-01-22) - Add `-T` option to `fig run` to disable pseudo-TTY. (#34) - Fix `fig up` requiring the ubuntu image to be pulled to recreate containers. (#33) Thanks @cameronmaske! - Improve reliability, fix arrow keys and fix a race condition in `fig run`. (#34, #39, #40) -## 0.1.1 (2014-01-17) +## 0.1.1 +(2014-01-17) - Fix bug where ports were not exposed correctly (#29). Thanks @dustinlacewell! -## 0.1.0 (2014-01-16) +## 0.1.0 +(2014-01-16) - Containers are recreated on each `fig up`, ensuring config is up-to-date with `fig.yml` (#2) - Add `fig scale` command (#9) @@ -2007,7 +2142,8 @@ Big thanks to @cameronmaske, @mrchrisadams and @damianmoore for their help with Big thanks to @tomstuart, @EnTeQuAk, @schickling, @aronasorman and @GeoffreyPlitt. -## 0.0.2 (2014-01-02) +## 0.0.2 +(2014-01-02) - Improve documentation - Try to connect to Docker on `tcp://localdocker:4243` and a UNIX socket in addition to `localhost`. @@ -2015,6 +2151,7 @@ Big thanks to @tomstuart, @EnTeQuAk, @schickling, @aronasorman and @GeoffreyPlit - Add confirmation prompt to `fig rm` - Add `fig build` command -## 0.0.1 (2013-12-20) +## 0.0.1 +(2013-12-20) Initial release. diff --git a/release-notes/docker-engine.md b/release-notes/docker-engine.md index aee826f57a..ff3c019650 100644 --- a/release-notes/docker-engine.md +++ b/release-notes/docker-engine.md @@ -3,6 +3,11 @@ title: Docker Engine release notes description: Release notes for Docker CE keywords: release notes, community toc_max: 2 +redirect_from: + - /cs-engine/1.12/release-notes/ + - /cs-engine/1.12/release-notes/release-notes/ + - /cs-engine/1.12/release-notes/prior-release-notes/ + - /cs-engine/1.13/release-notes/ --- These release notes are for Docker Engine versions 1.13.1 and lower. For newer diff --git a/samples/index.md b/samples/index.md index e61e83be3a..37f9044ad7 100644 --- a/samples/index.md +++ b/samples/index.md @@ -39,6 +39,7 @@ Run popular software using Docker. | Sample | Description | | ------ | ----------- | | [apt-cacher-ng](/engine/examples/apt-cacher-ng) | Run a Dockerized apt-cacher-ng instance. | +| [.Net Core application](/engine/examples/dotnetcore) | Run a Dockerized ASP.NET Core application. | | [ASP.NET Core + SQL Server on Linux](/compose/aspnet-mssql-compose) | Run a Dockerized ASP.NET Core + SQL Server environment. | | [CouchDB](/engine/examples/couchdb_data_volumes) | Run a Dockerized CouchDB instance. | | [Django + PostgreSQL](/compose/django/) | Run a Dockerized Django + PostgreSQL environment. | diff --git a/storage/bind-mounts.md b/storage/bind-mounts.md index c8d69fd266..d20ca5f742 100644 --- a/storage/bind-mounts.md +++ b/storage/bind-mounts.md @@ -23,7 +23,7 @@ manage bind mounts. ![bind mounts on the Docker host](images/types-of-mounts-bind.png) -## Choosing the -v or --mount flag +## Choose the -v or --mount flag Originally, the `-v` or `--volume` flag was used for standalone containers and the `--mount` flag was used for swarm services. However, starting with Docker @@ -159,7 +159,7 @@ $ docker container stop devtest $ docker container rm devtest ``` -### Mounting into a non-empty directory on the container +### Mount into a non-empty directory on the container If you bind-mount into a non-empty directory on the container, the directory's existing contents are obscured by the bind mount. This can be beneficial, diff --git a/storage/index.md b/storage/index.md index 47a8d076b7..a82609311a 100644 --- a/storage/index.md +++ b/storage/index.md @@ -100,7 +100,7 @@ mounts is to think about where the data lives on the Docker host. information. For instance, internally, swarm services use `tmpfs` mounts to mount [secrets](/engine/swarm/secrets.md) into a service's containers. -Bind mounts and volumes can both mounted into containers using the `-v` or +Bind mounts and volumes can both be mounted into containers using the `-v` or `--volume` flag, but the syntax for each is slightly different. For `tmpfs` mounts, you can use the `--tmpfs` flag. However, in Docker 17.06 and higher, we recommend using the `--mount` flag for both containers and services, for diff --git a/storage/storagedriver/aufs-driver.md b/storage/storagedriver/aufs-driver.md index a184f4989b..d5b17d284b 100644 --- a/storage/storagedriver/aufs-driver.md +++ b/storage/storagedriver/aufs-driver.md @@ -168,7 +168,7 @@ Consider some scenarios where files in a container are modified. However, AUFS works at the file level rather than the block level. This means that all copy_up operations copy the entire file, even if the file is very large and only a small part of it is being modified. This can have a - noticeable impact on container write performance. AUFS, which can suffer + noticeable impact on container write performance. AUFS can suffer noticeable latencies when searching for files in images with many layers. However, it is worth noting that the copy_up operation only occurs the first time a given file is written to. Subsequent writes to the same file operate diff --git a/storage/storagedriver/overlayfs-driver.md b/storage/storagedriver/overlayfs-driver.md index a8404d1c67..b602852b4e 100644 --- a/storage/storagedriver/overlayfs-driver.md +++ b/storage/storagedriver/overlayfs-driver.md @@ -1,6 +1,6 @@ --- description: Learn how to optimize your use of OverlayFS driver. -keywords: container, storage, driver, OverlayFS +keywords: container, storage, driver, OverlayFS, overlay2, overlay title: Use the OverlayFS storage driver redirect_from: - /engine/userguide/storagedriver/overlayfs-driver/ @@ -16,8 +16,7 @@ storage driver as `overlay` or `overlay2`. > **Note**: If you use OverlayFS, use the `overlay2` driver rather than the > `overlay` driver, because it is more efficient in terms of inode utilization. > To use the new driver, you need version 4.0 or higher of the Linux kernel, -> or RHEL or CentOS using version 3.10.0-514 and above. When using Docker EE -> on RHEL or CentOS, you will need to follow some extra steps. +> or RHEL or CentOS using version 3.10.0-514 and above. > > For more information about differences between `overlay` vs `overlay2`, check > [Docker storage drivers](select-storage-driver.md). @@ -26,21 +25,15 @@ storage driver as `overlay` or `overlay2`. OverlayFS is supported if you meet the following prerequisites: -- The `overlay2` driver is supported for Docker EE 17.06.02-ee5 and later and - recommended for Docker CE. - -- The `overlay` driver is allowed but not recommended for Docker CE. - +- The `overlay2` driver is supported on Docker CE, and Docker EE 17.06.02-ee5 and + up, and is the recommended storage driver. - Version 4.0 or higher of the Linux kernel, or RHEL or CentOS using - version 3.10.0-514 of the kernel or higher. Docker EE users using kernels older - than 4.0 need to follow some extra steps, outlined below. - If you use an older kernel, you need to use the `overlay` driver, which is not - recommended. + version 3.10.0-514 of the kernel or higher. If you use an older kernel, you need + to use the `overlay` driver, which is not recommended. +- The `overlay` and `overlay2` drivers are supported on `xfs` backing filesystems, + but only with `d_type=true` enabled. -- The following backing filesystems are supported: - - `ext4` (RHEL 7.1 only) - - `xfs` (RHEL 7.2 and higher), but only with `d_type=true` enabled. Use - `xfs_info` to verify that the `ftype` option is set to `1`. To format an + Use `xfs_info` to verify that the `ftype` option is set to `1`. To format an `xfs` filesystem correctly, use the flag `-n ftype=1`. > **Warning**: Running on XFS without d_type support now causes Docker to @@ -52,8 +45,8 @@ OverlayFS is supported if you meet the following prerequisites: - Changing the storage driver makes existing containers and images inaccessible on the local system. Use `docker save` to save any images you have built or - push them to Docker Hub or a private registry, so that you do not need to - re-create them later. + push them to Docker Hub or a private registry before changing the storage driver, + so that you do not need to re-create them later. ## Configure Docker with the `overlay` or `overlay2` storage driver @@ -70,6 +63,9 @@ be 4.0 or newer. Before following this procedure, you must first meet all the [prerequisites](#prerequisites). +The steps below outline how to configure the `overlay2` storage driver. If you +need to use the legacy `overlay` driver, specify it instead. + 1. Stop Docker. @@ -96,34 +92,6 @@ Before following this procedure, you must first meet all the } ``` - > **Note**: RHEL and CentOS users on Docker EE 17.06.02-ee5 and 17.06.02-ee6 - > - > You need to add a second option to the `daemon.json` to disable the check - > for version 4.0 or higher of the Linux kernel. Your `daemon.json` should - > look like the following. **This is only needed for Docker EE users of RHEL - > or CentOS.** Do not attempt to use `overlay2` with kernel versions older - > than 3.10.0-514. - > - > ```json - > { - > "storage-driver": "overlay2", - > "storage-opts": [ - > "overlay2.override_kernel_check=true" - > ] - > } - > ``` - > On kernel versions that support it, Docker EE versions 17.06.02-ee7 and - > later enable `overlay2` by default and do not require - > `override_kernel_check`. - - If you need to use the legacy `overlay` driver, specify it instead. - - More storage options are available. See all storage options for each storage - driver: - - - [Stable](/engine/reference/commandline/dockerd.md#storage-driver-options) - - [Edge](/edge/engine/reference/commandline/dockerd.md#storage-driver-options) - Docker does not start if the `daemon.json` file contains badly-formed JSON. 5. Start Docker. @@ -132,7 +100,7 @@ Before following this procedure, you must first meet all the $ sudo systemctl start docker ``` -4. Verify that the daemon is using the `overlay`/`overlay2` storage driver. +4. Verify that the daemon is using the `overlay2` storage driver. Use the `docker info` command and look for `Storage Driver` and `Backing filesystem`. @@ -142,12 +110,14 @@ Before following this procedure, you must first meet all the Containers: 0 Images: 0 Storage Driver: overlay2 - Backing Filesystem: extfs + Backing Filesystem: xfs + Supports d_type: true + Native Overlay Diff: true ``` -Docker is now using the `overlay2` storage driver. Docker has automatically -created the `overlay` mount with the required `lowerdir`, `upperdir`, `merged`, +Docker is now using the `overlay2` storage driver and has automatically +created the overlay mount with the required `lowerdir`, `upperdir`, `merged`, and `workdir` constructs. Continue reading for details about how OverlayFS works within your Docker @@ -165,9 +135,7 @@ process is referred to as a _union mount_. OverlayFS refers to the lower directo as `lowerdir` and the upper directory a `upperdir`. The unified view is exposed through its own directory called `merged`. -While the `overlay` driver only works with a single lower OverlayFS layer and -hence requires hard links for implementation of multi-layered images, the -`overlay2` driver natively supports up to 128 lower OverlayFS layers. This +The `overlay2` driver natively supports up to 128 lower OverlayFS layers. This capability provides better performance for layer-related Docker commands such as `docker build` and `docker commit`, and consumes fewer inodes on the backing filesystem. @@ -289,8 +257,10 @@ The `overlay` driver only works with two layers. This means that multi-layered images cannot be implemented as multiple OverlayFS layers. Instead, each image layer is implemented as its own directory under `/var/lib/docker/overlay`. Hard links are then used as a space-efficient way to reference data shared with lower -layers. As of Docker 1.10, image layer IDs no longer correspond to directory -names in `/var/lib/docker/`. +layers. The use of hardlinks causes an excessive use of inodes, which is a known +limitation of the legacy `overlay` storage driver, and may require additional +configuration of the backing filesystem. Refer to the [overlayFS and Docker +performance](#overlayfs-and-docker-performance) for details. To create a container, the `overlay` driver combines the directory representing the image's top layer plus a new directory for the container. The image's top @@ -490,7 +460,7 @@ Both `overlay2` and `overlay` drivers are more performant than `aufs` and far larger latencies if searching through many AUFS layers. `overlay2` supports multiple layers as well, but mitigates any performance hit with caching. -- **Inode limits**. Use of the `overlay` storage driver can cause excessive +- **Inode limits**. Use of the legacy `overlay` storage driver can cause excessive inode consumption. This is especially true in the presence of a large number of images and containers on the Docker host. The only way to increase the number of inodes available to a filesystem is to reformat it. To avoid running @@ -509,8 +479,8 @@ The following generic performance best practices also apply to OverlayFS. predictable performance for write-heavy workloads. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. Volumes have other benefits, such as - allowing you to share data among containers and persisting even when no - running container is using them. + allowing you to share data among containers and persisting your data even if + no running container is using them. ## Limitations on OverlayFS compatibility To summarize the OverlayFS's aspect which is incompatible with other diff --git a/storage/storagedriver/vfs-driver.md b/storage/storagedriver/vfs-driver.md index 6c0907d545..206f668c1f 100644 --- a/storage/storagedriver/vfs-driver.md +++ b/storage/storagedriver/vfs-driver.md @@ -13,8 +13,6 @@ performance and more space used on disk than other storage drivers. However, it is robust, stable, and works in every environment. It can also be used as a mechanism to verify other storage back-ends against, in a testing environment. -Docker 17.12 and higher include support for quotas when using the VFS driver. - ## Configure Docker with the `vfs` storage driver 1. Stop Docker. @@ -33,11 +31,12 @@ Docker 17.12 and higher include support for quotas when using the VFS driver. ``` If you want to set a quota to control the maximum size the VFS storage - driver can use, set the `size` option on the `storage-drivers` key. Quotas - are only supported in Docker 17.12 CE and higher. + driver can use, set the `size` option on the `storage-opts` key. Quotas + are only supported in Docker 17.12 and higher. ```json { + "storage-driver": "vfs", "storage-opts": ["size=256M"] } ``` @@ -51,14 +50,13 @@ Docker 17.12 and higher include support for quotas when using the VFS driver. ``` 4. Verify that the daemon is using the `vfs` storage driver. - Use the `docker info` command and look for `Storage Driver` and - `Backing filesystem`. + Use the `docker info` command and look for `Storage Driver`. ```bash $ docker info Storage Driver: vfs - + ... ``` Docker is now using the `vfs` storage driver. Docker has automatically diff --git a/storage/tmpfs.md b/storage/tmpfs.md index faf1326012..a7ffe16b90 100644 --- a/storage/tmpfs.md +++ b/storage/tmpfs.md @@ -29,7 +29,7 @@ persist in either the host or the container writable layer. containers. * This functionality is only available if you're running Docker on Linux. -## Choosing the --tmpfs or --mount flag +## Choose the --tmpfs or --mount flag Originally, the `--tmpfs` flag was used for standalone containers and the `--mount` flag was used for swarm services. However, starting with Docker diff --git a/storage/volumes.md b/storage/volumes.md index 58060b651c..1c5b42c570 100644 --- a/storage/volumes.md +++ b/storage/volumes.md @@ -447,6 +447,26 @@ $ docker run -d \ nginx:latest ``` +### Create a service which creates an NFS volume + +This example shows how you can create an NFS volume when creating a service. This example uses `10.0.0.10` as the NFS server and `/var/docker-nfs` as the exported directory on the NFS server. Note that the volume driver specified is `local`. + +#### NFSv3 +```bash +$ docker service create -d \ + --name nfs-service \ + --mount 'type=volume,source=nfsvolume,target=/app,volume-driver=local,volume-opt=type=nfs,volume-opt=device=:/var/docker-nfs,volume-opt=o=addr=10.0.0.10' \ + nginx:latest +``` + +#### NFSv4 +```bash +docker service create -d \ + --name nfs-service \ + --mount 'type=volume,source=nfsvolume,target=/app,volume-driver=local,volume-opt=type=nfs,volume-opt=device=:/,"volume-opt=o=10.0.0.10,rw,nfsvers=4,async"' \ + nginx:latest` +``` + ## Backup, restore, or migrate data volumes Volumes are useful for backups, restores, and migrations. Use the @@ -492,7 +512,7 @@ testing using your preferred tools. A Docker data volume persists after a container is deleted. There are two types of volumes to consider: -- **Named volumes** have a specific source form outside the container, for example `awesome:/bar`. +- **Named volumes** have a specific source from outside the container, for example `awesome:/bar`. - **Anonymous volumes** have no specific source so when the container is deleted, instruct the Docker Engine daemon to remove them. ### Remove anonymous volumes diff --git a/swarm/configure-tls.md b/swarm/configure-tls.md index eb18596ade..3034ce94ad 100644 --- a/swarm/configure-tls.md +++ b/swarm/configure-tls.md @@ -543,7 +543,7 @@ do this for the `ubuntu` user on your Docker Engine client. Congratulations! You have configured a Docker swarm cluster to use TLS. -## Related Information +## Related information * [Secure Docker Swarm with TLS](secure-swarm-tls.md) * [Docker security](/engine/security/security/) diff --git a/toolbox/toolbox_install_windows.md b/toolbox/toolbox_install_windows.md index dc808e9c57..c0491b09b5 100644 --- a/toolbox/toolbox_install_windows.md +++ b/toolbox/toolbox_install_windows.md @@ -10,10 +10,6 @@ Windows systems that do not meet minimal system requirements for the [Docker Desktop for Windows](/docker-for-windows/index.md) app. -If you have not done so already, download the installer here: - -[Get Docker Toolbox for Windows](https://download.docker.com/win/stable/DockerToolbox.exe){: class="button outline-btn" } - ## What you get and how it works Docker Toolbox includes the following Docker tools: @@ -103,6 +99,8 @@ installer. 1. Go to the [Docker Toolbox](https://www.docker.com/toolbox){: target="_blank" class="_" } page. +> **Note**: To continue with the latest version of Docker Toolbox, see https://github.com/docker/toolbox/releases for information about downloading the latest version and related components. If you choose to download from this location, an unsigned warning (verified publisher dialog) is displayed. + 2. Click the installer link to download. 3. Install Docker Toolbox by double-clicking the installer.